ignacioct commited on
Commit
8773ff3
β€’
1 Parent(s): 7f24bfc

recommiting all files

Browse files
.streamlit/config.toml ADDED
File without changes
DATASET_README_BASE.md ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Domain Dataset Grower
2
+
3
+ This dataset was generated by [distilabel](https://distilabel.argilla.io/latest/) as a domain specific dataset for the domain of farming. The dataset used this seed data to generate the samples. The seed data was define by a domain expert and the generated data can be reviewed in this [Argilla](https://argilla.io/) space here: [Argilla](https://huggingface.co/spaces/argilla/farming)
4
+
5
+ If you want to define a domain specific seed dataset for your own domain, you can use the distilabel tool to generate the dataset, and seed your dataset [here](https://huggingface.co/spaces/argilla/domain-specific-seed)
6
+
README.md CHANGED
@@ -1,12 +1,13 @@
1
  ---
2
- title: Domain Specific Dataset Template
3
  emoji: πŸ’»
4
- colorFrom: indigo
5
- colorTo: green
6
  sdk: streamlit
7
  sdk_version: 1.33.0
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Domain Specific Seed
3
  emoji: πŸ’»
4
+ colorFrom: purple
5
+ colorTo: red
6
  sdk: streamlit
7
  sdk_version: 1.33.0
8
  app_file: app.py
9
  pinned: false
10
+ license: apache-2.0
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ from defaults import (
4
+ PROJECT_NAME,
5
+ ARGILLA_SPACE_REPO_ID,
6
+ DATASET_REPO_ID,
7
+ ARGILLA_URL,
8
+ PROJECT_SPACE_REPO_ID,
9
+ DIBT_PARENT_APP_URL,
10
+ )
11
+ from utils import project_sidebar
12
+
13
+ st.set_page_config("Domain Data Grower", page_icon="πŸ§‘β€πŸŒΎ")
14
+
15
+ project_sidebar()
16
+
17
+ if PROJECT_NAME == "DEFAULT_DOMAIN":
18
+ st.warning(
19
+ "Please set up the project configuration in the parent app before proceeding."
20
+ )
21
+ st.stop()
22
+
23
+
24
+ st.header("πŸ§‘β€πŸŒΎ Domain Data Grower")
25
+ st.divider()
26
+
27
+ st.markdown(
28
+ """
29
+ ## 🌱 Create a dataset seed for aligning models to a specific domain
30
+
31
+ This app helps you create a dataset seed for building diverse domain-specific datasets for aligning models.
32
+ Alignment datasets are used to fine-tune models to a specific domain or task, but as yet, there's a shortage of diverse datasets for this purpose.
33
+ """
34
+ )
35
+ st.markdown(
36
+ """
37
+ ## 🚜 How it works
38
+
39
+ You can create a dataset seed by defining the domain expertise, perspectives, topics, and examples for your domain-specific dataset.
40
+ The dataset seed is then used to generate synthetic data for training a language model.
41
+
42
+ """
43
+ )
44
+ st.markdown(
45
+ """
46
+ ## πŸ—ΊοΈ The process
47
+
48
+ ### Step 1: ~~Setup the project~~
49
+
50
+ ~~Define the project details, including the project name, domain, and API credentials. Create Dataset Repo on the Hub.~~
51
+ """
52
+ )
53
+ st.link_button("πŸš€ ~~Setup Project via the parent app~~", DIBT_PARENT_APP_URL)
54
+
55
+ st.markdown(
56
+ """
57
+ ### Step 2: Describe the Domain
58
+
59
+ Define the domain expertise, perspectives, topics, and examples for your domain-specific dataset.
60
+ You can collaborate with domain experts to define the domain expertise and perspectives.
61
+ """
62
+ )
63
+
64
+ st.page_link(
65
+ "pages/2_πŸ‘©πŸΌβ€πŸ”¬ Describe Domain.py",
66
+ label="Describe Domain",
67
+ icon="πŸ‘©πŸΌβ€πŸ”¬",
68
+ )
69
+
70
+ st.markdown(
71
+ """
72
+ ### Step 3: Generate Synthetic Data
73
+
74
+ Use distilabel to generate synthetic data for your domain-specific dataset.
75
+ You can run the pipeline locally or in this space to generate synthetic data.
76
+ """
77
+ )
78
+
79
+ st.page_link(
80
+ "pages/3_🌱 Generate Dataset.py",
81
+ label="Generate Dataset",
82
+ icon="🌱",
83
+ )
84
+
85
+ st.markdown(
86
+ """
87
+ ### Step 4: Review the Dataset
88
+
89
+ Use Argilla to review the generated synthetic data and provide feedback on the quality of the data.
90
+
91
+
92
+ """
93
+ )
94
+ st.link_button("πŸ” Review the dataset in Argilla", ARGILLA_URL)
defaults.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ SEED_DATA_PATH = "seed_data.json"
4
+ PIPELINE_PATH = "pipeline.yaml"
5
+ REMOTE_CODE_PATHS = ["defaults.py", "domain.py", "pipeline.py"]
6
+ DIBT_PARENT_APP_URL = "https://argilla-domain-specific-datasets-welcome.hf.space/"
7
+ N_PERSPECTIVES = 5
8
+ N_TOPICS = 5
9
+ N_EXAMPLES = 5
10
+
11
+ ################################################
12
+ # DEFAULTS ON FARMING
13
+ ################################################
14
+
15
+ with open(SEED_DATA_PATH) as f:
16
+ DEFAULT_DATA = json.load(f)
17
+
18
+ DEFAULT_DOMAIN = DEFAULT_DATA["domain"]
19
+ DEFAULT_PERSPECTIVES = DEFAULT_DATA["perspectives"]
20
+ DEFAULT_TOPICS = DEFAULT_DATA["topics"]
21
+ DEFAULT_EXAMPLES = DEFAULT_DATA["examples"]
22
+ DEFAULT_SYSTEM_PROMPT = DEFAULT_DATA["domain_expert_prompt"]
23
+
24
+ ################################################
25
+ # PROJECT CONFIG FROM PARENT APP
26
+ ################################################
27
+
28
+ with open("project_config.json") as f:
29
+ PROJECT_CONFIG = json.load(f)
30
+
31
+ PROJECT_NAME = PROJECT_CONFIG["project_name"]
32
+ ARGILLA_SPACE_REPO_ID = PROJECT_CONFIG["argilla_space_repo_id"]
33
+ DATASET_REPO_ID = PROJECT_CONFIG["dataset_repo_id"]
34
+ ARGILLA_SPACE_NAME = ARGILLA_SPACE_REPO_ID.replace("/", "-").replace("_", "-")
35
+ ARGILLA_URL = f"https://{ARGILLA_SPACE_NAME}.hf.space"
36
+ PROJECT_SPACE_REPO_ID = PROJECT_CONFIG["project_space_repo_id"]
37
+ DATASET_URL = f"https://huggingface.co/datasets/{DATASET_REPO_ID}"
38
+ HUB_USERNAME = DATASET_REPO_ID.split("/")[0]
domain.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from typing import Any, Dict, List
3
+
4
+ from distilabel.steps.tasks.typing import ChatType
5
+ from distilabel.steps.tasks.text_generation import TextGeneration
6
+ from distilabel.steps import StepInput, StepOutput, Step
7
+
8
+ from dotenv import load_dotenv
9
+
10
+ from defaults import (
11
+ DEFAULT_DOMAIN,
12
+ DEFAULT_PERSPECTIVES,
13
+ DEFAULT_TOPICS,
14
+ DEFAULT_EXAMPLES,
15
+ DEFAULT_SYSTEM_PROMPT,
16
+ N_PERSPECTIVES,
17
+ N_TOPICS,
18
+ N_EXAMPLES,
19
+ )
20
+
21
+ load_dotenv()
22
+
23
+ # Application description used for SelfInstruct
24
+ APPLICATION_DESCRIPTION = f"""You are an AI assistant than generates queries around the domain of {DEFAULT_DOMAIN}.
25
+ Your should not expect basic but profound questions from your users.
26
+ The queries should reflect a diversity of vision and economic positions and political positions.
27
+ The queries may know about different methods of {DEFAULT_DOMAIN}.
28
+ The queries can be positioned politically, economically, socially, or practically.
29
+ Also take into account the impact of diverse causes on diverse domains."""
30
+
31
+
32
+ TOPICS = DEFAULT_TOPICS[:N_TOPICS]
33
+ PERSPECTIVES = DEFAULT_PERSPECTIVES[:N_PERSPECTIVES]
34
+ EXAMPLES = DEFAULT_EXAMPLES[:N_EXAMPLES]
35
+
36
+
37
+ def create_examples_template(examples: List[Dict[str, str]]) -> List[str]:
38
+ questions = """ Examples of high quality questions:"""
39
+ answers = """ Examples of high quality answers:"""
40
+ for example in examples:
41
+ questions += f"""\n- Question: {example["question"]}\n"""
42
+ answers += f"""\n- Answer: {example["answer"]}\n"""
43
+
44
+ _template: str = (
45
+ """{instruction}\nThis is the the instruction.\n Examples: """
46
+ + questions
47
+ + answers
48
+ )
49
+ return _template
50
+
51
+
52
+ def create_topics(topics: List[str], positions: List[str]) -> List[str]:
53
+ return [
54
+ f"{topic} from a {position} perspective"
55
+ for topic in topics
56
+ for position in positions
57
+ ]
58
+
59
+
60
+ class DomainExpert(TextGeneration):
61
+ """A customized task to generate text as a domain expert in the domain of farming and agriculture."""
62
+
63
+ _system_prompt: (str) = DEFAULT_SYSTEM_PROMPT
64
+ _template: str = """{instruction}\nThis is the the instruction.\n Examples: """
65
+
66
+ def format_input(self, input: Dict[str, Any]) -> "ChatType":
67
+ return [
68
+ {
69
+ "role": "system",
70
+ "content": self._system_prompt,
71
+ },
72
+ {
73
+ "role": "user",
74
+ "content": self._template.format(**input),
75
+ },
76
+ ]
77
+
78
+
79
+ class CleanNumberedList(Step):
80
+ """A step to clean the numbered list of questions."""
81
+
82
+ def process(self, inputs: StepInput) -> StepOutput:
83
+ import re
84
+
85
+ pattern = r"^\d+\.\s"
86
+
87
+ for input in inputs:
88
+ input["question"] = re.sub(pattern, "", input["question"])
89
+ yield inputs
hub.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from tempfile import mktemp
3
+
4
+ import argilla as rg
5
+ from huggingface_hub import HfApi
6
+
7
+ from defaults import REMOTE_CODE_PATHS, SEED_DATA_PATH
8
+
9
+
10
+ hf_api = HfApi()
11
+
12
+ with open("DATASET_README_BASE.md") as f:
13
+ DATASET_README_BASE = f.read()
14
+
15
+
16
+ def create_readme(domain_seed_data, project_name, domain):
17
+ # create a readme for the project that shows the domain and project name
18
+ readme = DATASET_README_BASE
19
+ readme += f"# {project_name}\n\n## Domain: {domain}"
20
+ perspectives = domain_seed_data.get("perspectives")
21
+ topics = domain_seed_data.get("topics")
22
+ examples = domain_seed_data.get("examples")
23
+ if perspectives:
24
+ readme += "\n\n## Perspectives\n\n"
25
+ for p in perspectives:
26
+ readme += f"- {p}\n"
27
+ if topics:
28
+ readme += "\n\n## Topics\n\n"
29
+ for t in topics:
30
+ readme += f"- {t}\n"
31
+ if examples:
32
+ readme += "\n\n## Examples\n\n"
33
+ for example in examples:
34
+ readme += f"### {example['question']}\n\n{example['answer']}\n\n"
35
+ temp_file = mktemp()
36
+
37
+ with open(temp_file, "w") as f:
38
+ f.write(readme)
39
+ return temp_file
40
+
41
+
42
+ def setup_dataset_on_hub(repo_id, hub_token):
43
+ # create an empty dataset repo on the hub
44
+ hf_api.create_repo(
45
+ repo_id=repo_id,
46
+ token=hub_token,
47
+ repo_type="dataset",
48
+ exist_ok=True,
49
+ )
50
+
51
+
52
+ def push_dataset_to_hub(
53
+ domain_seed_data_path,
54
+ project_name,
55
+ domain,
56
+ pipeline_path,
57
+ hub_username,
58
+ hub_token: str,
59
+ ):
60
+ repo_id = f"{hub_username}/{project_name}"
61
+
62
+ setup_dataset_on_hub(repo_id=repo_id, hub_token=hub_token)
63
+
64
+ # upload the seed data and readme to the hub
65
+ hf_api.upload_file(
66
+ path_or_fileobj=domain_seed_data_path,
67
+ path_in_repo="seed_data.json",
68
+ token=hub_token,
69
+ repo_id=repo_id,
70
+ repo_type="dataset",
71
+ )
72
+
73
+ # upload the readme to the hub
74
+ domain_seed_data = json.load(open(domain_seed_data_path))
75
+ hf_api.upload_file(
76
+ path_or_fileobj=create_readme(
77
+ domain_seed_data=domain_seed_data, project_name=project_name, domain=domain
78
+ ),
79
+ path_in_repo="README.md",
80
+ token=hub_token,
81
+ repo_id=repo_id,
82
+ repo_type="dataset",
83
+ )
84
+
85
+
86
+ def push_pipeline_to_hub(
87
+ pipeline_path,
88
+ hub_username,
89
+ hub_token: str,
90
+ project_name,
91
+ ):
92
+ repo_id = f"{hub_username}/{project_name}"
93
+
94
+ # upload the pipeline to the hub
95
+ hf_api.upload_file(
96
+ path_or_fileobj=pipeline_path,
97
+ path_in_repo="pipeline.yaml",
98
+ token=hub_token,
99
+ repo_id=repo_id,
100
+ repo_type="dataset",
101
+ )
102
+
103
+ for code_path in REMOTE_CODE_PATHS:
104
+ hf_api.upload_file(
105
+ path_or_fileobj=code_path,
106
+ path_in_repo=code_path,
107
+ token=hub_token,
108
+ repo_id=repo_id,
109
+ repo_type="dataset",
110
+ )
111
+
112
+ print(f"Dataset uploaded to {repo_id}")
113
+
114
+
115
+ def pull_seed_data_from_repo(repo_id, hub_token):
116
+ # pull the dataset repo from the hub
117
+ hf_api.hf_hub_download(
118
+ repo_id=repo_id, token=hub_token, repo_type="dataset", filename=SEED_DATA_PATH
119
+ )
120
+ return json.load(open(SEED_DATA_PATH))
121
+
122
+
123
+ def push_argilla_dataset_to_hub(
124
+ name: str, repo_id: str, url: str, api_key: str, workspace: str = "admin"
125
+ ):
126
+ rg.init(api_url=url, api_key=api_key)
127
+ feedback_dataset = rg.FeedbackDataset.from_argilla(name=name, workspace=workspace)
128
+ local_dataset = feedback_dataset.pull()
129
+ local_dataset.push_to_huggingface(repo_id=repo_id)
infer.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+
4
+ HF_API_KEY = os.getenv("HF_API_KEY")
5
+ API_URL = (
6
+ "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2"
7
+ )
8
+ headers = {"Authorization": f"Bearer {HF_API_KEY}"}
9
+
10
+
11
+ def query(question):
12
+ payload = {
13
+ "inputs": question,
14
+ }
15
+ response = requests.post(API_URL, headers=headers, json=payload)
16
+ return response.json()[0]["generated_text"]
pages/2_πŸ‘©πŸΌβ€πŸ”¬ Describe Domain.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ import streamlit as st
4
+
5
+ from hub import push_dataset_to_hub
6
+ from infer import query
7
+ from defaults import (
8
+ DEFAULT_DOMAIN,
9
+ DEFAULT_PERSPECTIVES,
10
+ DEFAULT_TOPICS,
11
+ DEFAULT_EXAMPLES,
12
+ DEFAULT_SYSTEM_PROMPT,
13
+ N_PERSPECTIVES,
14
+ N_TOPICS,
15
+ SEED_DATA_PATH,
16
+ PIPELINE_PATH,
17
+ PROJECT_NAME,
18
+ DATASET_REPO_ID,
19
+ )
20
+ from utils import project_sidebar
21
+
22
+ st.set_page_config(
23
+ page_title="Domain Data Grower",
24
+ page_icon="πŸ§‘β€πŸŒΎ",
25
+ )
26
+ project_sidebar()
27
+
28
+ ################################################################################
29
+ # HEADER
30
+ ################################################################################
31
+
32
+ st.header("πŸ§‘β€πŸŒΎ Domain Data Grower")
33
+ st.divider()
34
+ st.subheader(
35
+ "Step 2. Define the specific domain that you want to generate synthetic data for.",
36
+ )
37
+ st.write(
38
+ "Define the project details, including the project name, domain, and API credentials"
39
+ )
40
+
41
+ ################################################################################
42
+ # Domain Expert Section
43
+ ################################################################################
44
+
45
+ (
46
+ tab_domain_expert,
47
+ tab_domain_perspectives,
48
+ tab_domain_topics,
49
+ tab_examples,
50
+ ) = st.tabs(
51
+ tabs=[
52
+ "πŸ‘©πŸΌβ€πŸ”¬ Domain Expert",
53
+ "πŸ” Domain Perspectives",
54
+ "πŸ•ΈοΈ Domain Topics",
55
+ "πŸ“š Examples",
56
+ ]
57
+ )
58
+
59
+ with tab_domain_expert:
60
+ st.text("Define the domain expertise that you want to train a language model")
61
+ st.info(
62
+ "A domain expert is a person who is an expert in a particular field or area. For example, a domain expert in farming would be someone who has extensive knowledge and experience in farming and agriculture."
63
+ )
64
+
65
+ domain = st.text_input("Domain Name", DEFAULT_DOMAIN)
66
+
67
+ domain_expert_prompt = st.text_area(
68
+ label="Domain Expert Definition",
69
+ value=DEFAULT_SYSTEM_PROMPT,
70
+ height=200,
71
+ )
72
+
73
+ ################################################################################
74
+ # Domain Perspectives
75
+ ################################################################################
76
+
77
+ with tab_domain_perspectives:
78
+ st.text("Define the different perspectives from which the domain can be viewed")
79
+ st.info(
80
+ """
81
+ Perspectives are different viewpoints or angles from which a domain can be viewed.
82
+ For example, the domain of farming can be viewed from the perspective of a commercial
83
+ farmer or an independent family farmer."""
84
+ )
85
+
86
+ perspectives = st.session_state.get(
87
+ "perspectives",
88
+ [st.text_input(f"Domain Perspective 0", value=DEFAULT_PERSPECTIVES[0])],
89
+ )
90
+
91
+ if st.button("Add New Perspective"):
92
+ n = len(perspectives)
93
+ value = DEFAULT_PERSPECTIVES[n] if n < N_PERSPECTIVES else ""
94
+ perspectives.append(st.text_input(f"Domain Perspective {n}", value=""))
95
+ st.session_state["perspectives"] = perspectives
96
+
97
+
98
+ ################################################################################
99
+ # Domain Topics
100
+ ################################################################################
101
+
102
+ with tab_domain_topics:
103
+ st.text("Define the main themes or subjects that are relevant to the domain")
104
+ st.info(
105
+ """Topics are the main themes or subjects that are relevant to the domain. For example, the domain of farming can have topics like soil health, crop rotation, or livestock management."""
106
+ )
107
+ topics = st.session_state.get(
108
+ "topics", [st.text_input(f"Domain Topic 0", value=DEFAULT_TOPICS[0])]
109
+ )
110
+ new_topic = st.button("Add New Topic")
111
+
112
+ if new_topic:
113
+ n = len(topics)
114
+ value = DEFAULT_TOPICS[n] if n < N_TOPICS else ""
115
+ topics.append(st.text_input(f"Domain Topic {n}", value=value))
116
+ st.session_state["topics"] = topics
117
+
118
+
119
+ ################################################################################
120
+ # Examples Section
121
+ ################################################################################
122
+
123
+ with tab_examples:
124
+ st.text(
125
+ "Add high-quality questions and answers that can be used to generate synthetic data"
126
+ )
127
+ st.info(
128
+ """
129
+ Examples are high-quality questions and answers that can be used to generate
130
+ synthetic data for the domain. These examples will be used to train the language model
131
+ to generate questions and answers.
132
+ """
133
+ )
134
+
135
+ questions_answers = st.session_state.get(
136
+ "questions_answers",
137
+ [
138
+ (
139
+ st.text_area(
140
+ "Question", key="question_0", value=DEFAULT_EXAMPLES[0]["question"]
141
+ ),
142
+ st.text_area(
143
+ "Answer", key="answer_0", value=DEFAULT_EXAMPLES[0]["answer"]
144
+ ),
145
+ )
146
+ ],
147
+ )
148
+
149
+ if st.button("Add New Example"):
150
+ n = len(questions_answers)
151
+ default_question, default_answer = DEFAULT_EXAMPLES[n].values()
152
+ st.subheader(f"Example {n + 1}")
153
+ if st.button("Generate New Answer", key=f"generate_{n}"):
154
+ default_answer = query(default_question)
155
+ _question = st.text_area(
156
+ "Question", key=f"question_{n}", value=default_question
157
+ )
158
+ _answer = st.text_area("Answer", key=f"answer_{n}", value=default_answer)
159
+ questions_answers.append((_question, _answer))
160
+ st.session_state["questions_answers"] = questions_answers
161
+
162
+ ################################################################################
163
+ # Setup Dataset on the Hub
164
+ ################################################################################
165
+
166
+ st.divider()
167
+
168
+ hub_username = DATASET_REPO_ID.split("/")[0]
169
+ project_name = DATASET_REPO_ID.split("/")[1]
170
+ st.write("Define the dataset repo details on the Hub")
171
+ st.session_state["project_name"] = st.text_input("Project Name", project_name)
172
+ st.session_state["hub_username"] = st.text_input("Hub Username", hub_username)
173
+ st.session_state["hub_token"] = st.text_input("Hub Token", type="password", value=None)
174
+
175
+ if all(
176
+ (
177
+ st.session_state.get("project_name"),
178
+ st.session_state.get("hub_username"),
179
+ st.session_state.get("hub_token"),
180
+ )
181
+ ):
182
+ st.success(f"Using the dataset repo {hub_username}/{project_name} on the Hub")
183
+
184
+
185
+ if st.button("πŸ€— Push Dataset Seed") and all(
186
+ (
187
+ domain,
188
+ domain_expert_prompt,
189
+ perspectives,
190
+ topics,
191
+ questions_answers,
192
+ )
193
+ ):
194
+ if all(
195
+ (
196
+ st.session_state.get("project_name"),
197
+ st.session_state.get("hub_username"),
198
+ st.session_state.get("hub_token"),
199
+ )
200
+ ):
201
+ project_name = st.session_state["project_name"]
202
+ hub_username = st.session_state["hub_username"]
203
+ hub_token = st.session_state["hub_token"]
204
+ else:
205
+ st.error(
206
+ "Please create a dataset repo on the Hub before pushing the dataset seed"
207
+ )
208
+ st.stop()
209
+
210
+ perspectives = list(filter(None, perspectives))
211
+ topics = list(filter(None, topics))
212
+ examples = [{"question": q, "answer": a} for q, a in questions_answers]
213
+
214
+ domain_data = {
215
+ "domain": domain,
216
+ "perspectives": perspectives,
217
+ "topics": topics,
218
+ "examples": examples,
219
+ "domain_expert_prompt": domain_expert_prompt,
220
+ }
221
+
222
+ with open(SEED_DATA_PATH, "w") as f:
223
+ json.dump(domain_data, f, indent=2)
224
+
225
+ push_dataset_to_hub(
226
+ domain_seed_data_path=SEED_DATA_PATH,
227
+ project_name=project_name,
228
+ domain=domain,
229
+ hub_username=hub_username,
230
+ hub_token=hub_token,
231
+ pipeline_path=PIPELINE_PATH,
232
+ )
233
+
234
+ st.sidebar.success(
235
+ f"Dataset seed created and pushed to the Hub. Check it out [here](https://huggingface.co/datasets/{hub_username}/{project_name})"
236
+ )
237
+ else:
238
+ st.info(
239
+ "Please fill in all the required domain fields to push the dataset seed to the Hub"
240
+ )
pages/3_🌱 Generate Dataset.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from streamlit.errors import EntryNotFoundError
3
+
4
+ from hub import pull_seed_data_from_repo, push_pipeline_to_hub
5
+ from defaults import (
6
+ DEFAULT_SYSTEM_PROMPT,
7
+ PIPELINE_PATH,
8
+ PROJECT_NAME,
9
+ ARGILLA_SPACE_REPO_ID,
10
+ DATASET_REPO_ID,
11
+ ARGILLA_SPACE_NAME,
12
+ ARGILLA_URL,
13
+ PROJECT_SPACE_REPO_ID,
14
+ HUB_USERNAME,
15
+ )
16
+ from utils import project_sidebar
17
+
18
+ from pipeline import serialize_pipeline, run_pipeline, create_pipelines_run_command
19
+
20
+ st.set_page_config(
21
+ page_title="Domain Data Grower",
22
+ page_icon="πŸ§‘β€πŸŒΎ",
23
+ )
24
+
25
+ project_sidebar()
26
+
27
+ ################################################################################
28
+ # HEADER
29
+ ################################################################################
30
+
31
+ st.header("πŸ§‘β€πŸŒΎ Domain Data Grower")
32
+ st.divider()
33
+ st.subheader("Step 3. Run the pipeline to generate synthetic data")
34
+ st.write(
35
+ "Define the project details, including the project name, domain, and API credentials"
36
+ )
37
+
38
+
39
+ ###############################################################
40
+ # CONFIGURATION
41
+ ###############################################################
42
+
43
+ st.divider()
44
+
45
+ st.markdown("### Pipeline Configuration")
46
+
47
+ st.write("πŸ€— Hub details to pull the seed data")
48
+ hub_username = st.text_input("Hub Username", HUB_USERNAME)
49
+ project_name = st.text_input("Project Name", PROJECT_NAME)
50
+ repo_id = f"{hub_username}/{project_name}"
51
+ hub_token = st.text_input("Hub Token", type="password")
52
+
53
+ st.write("πŸ€– Inference configuration")
54
+
55
+ st.write(
56
+ "Add the url of the Huggingface inference API or endpoint that your pipeline should use. You can find compatible models here:"
57
+ )
58
+ st.link_button(
59
+ "πŸ€— Inference compaptible models on the hub",
60
+ "https://huggingface.co/models?pipeline_tag=text-generation&other=endpoints_compatible&sort=trending",
61
+ )
62
+
63
+ base_url = st.text_input("Base URL")
64
+
65
+ st.write("πŸ”¬ Argilla API details to push the generated dataset")
66
+ argilla_url = st.text_input("Argilla API URL", ARGILLA_URL)
67
+ argilla_api_key = st.text_input("Argilla API Key", "owner.apikey")
68
+ argilla_dataset_name = st.text_input("Argilla Dataset Name", project_name)
69
+ st.divider()
70
+
71
+ ###############################################################
72
+ # LOCAL
73
+ ###############################################################
74
+
75
+ st.markdown("### Run the pipeline")
76
+
77
+ st.write(
78
+ "Once you've defined the pipeline configuration, you can run the pipeline locally or on this space."
79
+ )
80
+
81
+ st.write(
82
+ """We recommend running the pipeline locally if you're planning on generating a large dataset. \
83
+ But running the pipeline on this space is a handy way to get started quickly. Your synthetic
84
+ samples will be pushed to Argilla and available for review.
85
+ """
86
+ )
87
+ st.write(
88
+ """If you're planning on running the pipeline on the space, be aware that it \
89
+ will take some time to complete and you will need to maintain a \
90
+ connection to the space."""
91
+ )
92
+
93
+
94
+ if st.button("πŸ’» Run pipeline locally", key="run_pipeline_local"):
95
+ if all(
96
+ [
97
+ argilla_api_key,
98
+ argilla_url,
99
+ base_url,
100
+ hub_username,
101
+ project_name,
102
+ hub_token,
103
+ argilla_dataset_name,
104
+ ]
105
+ ):
106
+ with st.spinner("Pulling seed data from the Hub..."):
107
+ seed_data = pull_seed_data_from_repo(
108
+ repo_id=f"{hub_username}/{project_name}",
109
+ hub_token=hub_token,
110
+ )
111
+
112
+ domain = seed_data["domain"]
113
+ perspectives = seed_data["perspectives"]
114
+ topics = seed_data["topics"]
115
+ examples = seed_data["examples"]
116
+ domain_expert_prompt = seed_data["domain_expert_prompt"]
117
+
118
+ with st.spinner("Serializing the pipeline configuration..."):
119
+ serialize_pipeline(
120
+ argilla_api_key=argilla_api_key,
121
+ argilla_dataset_name=argilla_dataset_name,
122
+ argilla_api_url=argilla_url,
123
+ topics=topics,
124
+ perspectives=perspectives,
125
+ pipeline_config_path=PIPELINE_PATH,
126
+ domain_expert_prompt=domain_expert_prompt or DEFAULT_SYSTEM_PROMPT,
127
+ hub_token=hub_token,
128
+ endpoint_base_url=base_url,
129
+ examples=examples,
130
+ )
131
+ push_pipeline_to_hub(
132
+ pipeline_path=PIPELINE_PATH,
133
+ hub_token=hub_token,
134
+ hub_username=hub_username,
135
+ project_name=project_name,
136
+ )
137
+
138
+ st.success(f"Pipeline configuration saved to {hub_username}/{project_name}")
139
+
140
+ st.info(
141
+ "To run the pipeline locally, you need to have the `distilabel` library installed. You can install it using the following command:"
142
+ )
143
+ st.text(
144
+ "Execute the following command to generate a synthetic dataset from the seed data:"
145
+ )
146
+ command_to_run = create_pipelines_run_command(
147
+ hub_token=hub_token,
148
+ pipeline_config_path=PIPELINE_PATH,
149
+ argilla_dataset_name=argilla_dataset_name,
150
+ )
151
+ st.code(
152
+ f"""
153
+ pip install git+https://github.com/argilla-io/distilabel.git
154
+ git clone https://huggingface.co/{hub_username}/{project_name}
155
+ cd {project_name}
156
+ {' '.join(command_to_run[2:])}
157
+ """,
158
+ language="bash",
159
+ )
160
+ else:
161
+ st.error("Please fill all the required fields.")
162
+
163
+ ###############################################################
164
+ # SPACE
165
+ ###############################################################
166
+
167
+ if st.button("πŸ”₯ Run pipeline right here, right now!"):
168
+ if all(
169
+ [
170
+ argilla_api_key,
171
+ argilla_url,
172
+ base_url,
173
+ hub_username,
174
+ project_name,
175
+ hub_token,
176
+ argilla_dataset_name,
177
+ ]
178
+ ):
179
+ with st.spinner("Pulling seed data from the Hub..."):
180
+ try:
181
+ seed_data = pull_seed_data_from_repo(
182
+ repo_id=f"{hub_username}/{project_name}",
183
+ hub_token=hub_token,
184
+ )
185
+ except EntryNotFoundError:
186
+ st.error(
187
+ "Seed data not found. Please make sure you pushed the data seed in Step 2."
188
+ )
189
+
190
+ domain = seed_data["domain"]
191
+ perspectives = seed_data["perspectives"]
192
+ topics = seed_data["topics"]
193
+ examples = seed_data["examples"]
194
+ domain_expert_prompt = seed_data["domain_expert_prompt"]
195
+
196
+ with st.spinner("Serializing the pipeline configuration..."):
197
+ serialize_pipeline(
198
+ argilla_api_key=argilla_api_key,
199
+ argilla_dataset_name=argilla_dataset_name,
200
+ argilla_api_url=argilla_url,
201
+ topics=topics,
202
+ perspectives=perspectives,
203
+ pipeline_config_path=PIPELINE_PATH,
204
+ domain_expert_prompt=domain_expert_prompt or DEFAULT_SYSTEM_PROMPT,
205
+ hub_token=hub_token,
206
+ endpoint_base_url=base_url,
207
+ examples=examples,
208
+ )
209
+
210
+ with st.spinner("Starting the pipeline..."):
211
+ logs = run_pipeline(PIPELINE_PATH)
212
+
213
+ st.success(f"Pipeline started successfully! πŸš€")
214
+
215
+ with st.expander(label="View Logs", expanded=True):
216
+ for out in logs:
217
+ st.text(out)
218
+ else:
219
+ st.error("Please fill all the required fields.")
pages/4_πŸ” Review Generated Data.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ from defaults import PROJECT_NAME, ARGILLA_URL, DATASET_REPO_ID
4
+ from utils import project_sidebar
5
+ from hub import push_argilla_dataset_to_hub
6
+
7
+ st.set_page_config(
8
+ page_title="Domain Data Grower",
9
+ page_icon="πŸ§‘β€πŸŒΎ",
10
+ )
11
+
12
+ project_sidebar()
13
+
14
+ ################################################################################
15
+ # HEADER
16
+ ################################################################################
17
+
18
+ st.header("πŸ§‘β€πŸŒΎ Domain Data Grower")
19
+ st.divider()
20
+
21
+ st.write(
22
+ """Once you have reviewed the synthetic data in Argilla, you can publish the
23
+ generated dataset to the Hub."""
24
+ )
25
+
26
+
27
+ ################################################################################
28
+ # Configuration
29
+ ################################################################################
30
+
31
+ st.divider()
32
+ st.write("πŸ”¬ Argilla API details to push the generated dataset")
33
+ argilla_url = st.text_input("Argilla API URL", ARGILLA_URL)
34
+ argilla_api_key = st.text_input("Argilla API Key", "owner.apikey")
35
+ argilla_dataset_name = st.text_input("Argilla Dataset Name", PROJECT_NAME)
36
+ dataset_repo_id = st.text_input("Dataset Repo ID", DATASET_REPO_ID)
37
+ st.divider()
38
+
39
+ if st.button("πŸš€ Publish the generated dataset"):
40
+ with st.spinner("Publishing the generated dataset..."):
41
+ push_argilla_dataset_to_hub(
42
+ name=argilla_dataset_name,
43
+ repo_id=dataset_repo_id,
44
+ url=argilla_url,
45
+ api_key=argilla_api_key,
46
+ workspace="admin",
47
+ )
48
+ st.success("The generated dataset has been published to the Hub.")
pipeline.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+ import time
4
+ from typing import List
5
+
6
+ from distilabel.steps.generators.data import LoadDataFromDicts
7
+ from distilabel.steps.expand import ExpandColumns
8
+ from distilabel.steps.keep import KeepColumns
9
+ from distilabel.steps.tasks.self_instruct import SelfInstruct
10
+ from distilabel.steps.tasks.evol_instruct.base import EvolInstruct
11
+ from distilabel.llms.huggingface import InferenceEndpointsLLM
12
+ from distilabel.pipeline import Pipeline
13
+ from distilabel.steps import TextGenerationToArgilla
14
+ from dotenv import load_dotenv
15
+
16
+ from domain import (
17
+ DomainExpert,
18
+ CleanNumberedList,
19
+ create_topics,
20
+ create_examples_template,
21
+ APPLICATION_DESCRIPTION,
22
+ )
23
+
24
+ load_dotenv()
25
+
26
+
27
+ def define_pipeline(
28
+ argilla_api_key: str,
29
+ argilla_api_url: str,
30
+ argilla_dataset_name: str,
31
+ topics: List[str],
32
+ perspectives: List[str],
33
+ domain_expert_prompt: str,
34
+ examples: List[dict],
35
+ hub_token: str,
36
+ endpoint_base_url: str,
37
+ ):
38
+ """Define the pipeline for the specific domain."""
39
+
40
+ terms = create_topics(topics, perspectives)
41
+ template = create_examples_template(examples)
42
+ with Pipeline("farming") as pipeline:
43
+ load_data = LoadDataFromDicts(
44
+ name="load_data",
45
+ data=[{"input": term} for term in terms],
46
+ batch_size=64,
47
+ )
48
+ llm = InferenceEndpointsLLM(
49
+ base_url=endpoint_base_url,
50
+ api_key=hub_token,
51
+ )
52
+ self_instruct = SelfInstruct(
53
+ name="self-instruct",
54
+ application_description=APPLICATION_DESCRIPTION,
55
+ num_instructions=5,
56
+ input_batch_size=8,
57
+ llm=llm,
58
+ )
59
+
60
+ evol_instruction_complexity = EvolInstruct(
61
+ name="evol_instruction_complexity",
62
+ llm=llm,
63
+ num_evolutions=2,
64
+ store_evolutions=True,
65
+ input_batch_size=8,
66
+ include_original_instruction=True,
67
+ input_mappings={"instruction": "question"},
68
+ )
69
+
70
+ expand_instructions = ExpandColumns(
71
+ name="expand_columns", columns={"instructions": "question"}
72
+ )
73
+ cleaner = CleanNumberedList(name="clean_numbered_list")
74
+ expand_evolutions = ExpandColumns(
75
+ name="expand_columns_evolved",
76
+ columns={"evolved_instructions": "evolved_questions"},
77
+ )
78
+
79
+ domain_expert = DomainExpert(
80
+ name="domain_expert",
81
+ llm=llm,
82
+ input_batch_size=8,
83
+ input_mappings={"instruction": "evolved_questions"},
84
+ output_mappings={"generation": "domain_expert_answer"},
85
+ _system_prompt=domain_expert_prompt,
86
+ _template=template,
87
+ )
88
+
89
+ keep_columns = KeepColumns(
90
+ name="keep_columns",
91
+ columns=["model_name", "evolved_questions", "domain_expert_answer"],
92
+ )
93
+
94
+ to_argilla = TextGenerationToArgilla(
95
+ name="text_generation_to_argilla",
96
+ dataset_name=argilla_dataset_name,
97
+ dataset_workspace="admin",
98
+ api_url=argilla_api_url,
99
+ api_key=argilla_api_key,
100
+ input_mappings={
101
+ "instruction": "evolved_questions",
102
+ "generation": "domain_expert_answer",
103
+ },
104
+ )
105
+
106
+ load_data.connect(self_instruct)
107
+ self_instruct.connect(expand_instructions)
108
+ expand_instructions.connect(cleaner)
109
+ cleaner.connect(evol_instruction_complexity)
110
+ evol_instruction_complexity.connect(expand_evolutions)
111
+ expand_evolutions.connect(domain_expert)
112
+ domain_expert.connect(keep_columns)
113
+ keep_columns.connect(to_argilla)
114
+ return pipeline
115
+
116
+
117
+ def serialize_pipeline(
118
+ argilla_api_key: str,
119
+ argilla_api_url: str,
120
+ argilla_dataset_name: str,
121
+ topics: List[str],
122
+ perspectives: List[str],
123
+ domain_expert_prompt: str,
124
+ hub_token: str,
125
+ endpoint_base_url: str,
126
+ pipeline_config_path: str = "pipeline.yaml",
127
+ examples: List[dict] = [],
128
+ ):
129
+ """Serialize the pipeline to a yaml file."""
130
+ pipeline = define_pipeline(
131
+ argilla_api_key=argilla_api_key,
132
+ argilla_api_url=argilla_api_url,
133
+ argilla_dataset_name=argilla_dataset_name,
134
+ topics=topics,
135
+ perspectives=perspectives,
136
+ domain_expert_prompt=domain_expert_prompt,
137
+ hub_token=hub_token,
138
+ endpoint_base_url=endpoint_base_url,
139
+ examples=examples,
140
+ )
141
+ pipeline.save(path=pipeline_config_path, overwrite=True, format="yaml")
142
+
143
+
144
+ def create_pipelines_run_command(
145
+ pipeline_config_path: str = "pipeline.yaml",
146
+ argilla_dataset_name: str = "domain_specific_datasets",
147
+ ):
148
+ """Create the command to run the pipeline."""
149
+ command_to_run = [
150
+ "python",
151
+ "-m",
152
+ "distilabel",
153
+ "pipeline",
154
+ "run",
155
+ "--config",
156
+ pipeline_config_path,
157
+ "--param",
158
+ f"text_generation_to_argilla.dataset_name={argilla_dataset_name}",
159
+ ]
160
+ return command_to_run
161
+
162
+
163
+ def run_pipeline(
164
+ pipeline_config_path: str = "pipeline.yaml",
165
+ argilla_dataset_name: str = "domain_specific_datasets",
166
+ ):
167
+ """Run the pipeline and yield the output as a generator of logs."""
168
+
169
+ command_to_run = create_pipelines_run_command(
170
+ pipeline_config_path=pipeline_config_path,
171
+ argilla_dataset_name=argilla_dataset_name,
172
+ )
173
+
174
+ # Run the script file
175
+ process = subprocess.Popen(
176
+ command_to_run, stdout=subprocess.PIPE, stderr=subprocess.PIPE
177
+ )
178
+
179
+ while process.stdout and process.stdout.readable():
180
+ time.sleep(0.2)
181
+ line = process.stdout.readline()
182
+ if not line:
183
+ break
184
+ yield line.decode("utf-8")
pipeline.yaml ADDED
@@ -0,0 +1,546 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ distilabel:
2
+ version: 1.0.0
3
+ pipeline:
4
+ name: farming
5
+ description: null
6
+ steps:
7
+ - step:
8
+ name: load_data
9
+ input_mappings: {}
10
+ output_mappings: {}
11
+ batch_size: 64
12
+ data:
13
+ - input: animal welfare from a Family Farming perspective
14
+ - input: animal welfare from a Agribusiness perspective
15
+ - input: animal welfare from a Permaculture perspective
16
+ - input: animal welfare from a Agroforestery perspective
17
+ - input: animal welfare from a Conventional Farming perspective
18
+ - input: economic growth from a Family Farming perspective
19
+ - input: economic growth from a Agribusiness perspective
20
+ - input: economic growth from a Permaculture perspective
21
+ - input: economic growth from a Agroforestery perspective
22
+ - input: economic growth from a Conventional Farming perspective
23
+ - input: land from a Family Farming perspective
24
+ - input: land from a Agribusiness perspective
25
+ - input: land from a Permaculture perspective
26
+ - input: land from a Agroforestery perspective
27
+ - input: land from a Conventional Farming perspective
28
+ - input: resources from a Family Farming perspective
29
+ - input: resources from a Agribusiness perspective
30
+ - input: resources from a Permaculture perspective
31
+ - input: resources from a Agroforestery perspective
32
+ - input: resources from a Conventional Farming perspective
33
+ - input: efficiency from a Family Farming perspective
34
+ - input: efficiency from a Agribusiness perspective
35
+ - input: efficiency from a Permaculture perspective
36
+ - input: efficiency from a Agroforestery perspective
37
+ - input: efficiency from a Conventional Farming perspective
38
+ runtime_parameters_info:
39
+ - name: batch_size
40
+ optional: true
41
+ description: The number of rows that will contain the batches generated by
42
+ the step.
43
+ type_info:
44
+ module: distilabel.steps.generators.data
45
+ name: LoadDataFromDicts
46
+ name: load_data
47
+ - step:
48
+ name: self-instruct
49
+ input_mappings: {}
50
+ output_mappings: {}
51
+ input_batch_size: 8
52
+ llm:
53
+ generation_kwargs: {}
54
+ model_id: null
55
+ endpoint_name: null
56
+ endpoint_namespace: null
57
+ base_url: https://hh1rkuymnetmkw9m.eu-west-1.aws.endpoints.huggingface.cloud
58
+ tokenizer_id: null
59
+ model_display_name: null
60
+ use_openai_client: false
61
+ type_info:
62
+ module: distilabel.llms.huggingface.inference_endpoints
63
+ name: InferenceEndpointsLLM
64
+ group_generations: false
65
+ num_generations: 1
66
+ num_instructions: 5
67
+ criteria_for_query_generation: 'Incorporate a diverse range of verbs, avoiding
68
+ repetition.
69
+
70
+ Ensure queries are compatible with AI model''s text generation functions and
71
+ are limited to 1-2 sentences.
72
+
73
+ Design queries to be self-contained and standalone.
74
+
75
+ Blend interrogative (e.g., "What is the significance of x?") and imperative
76
+ (e.g., "Detail the process of x.") styles.'
77
+ application_description: 'You are an AI assistant than generates queries around
78
+ the domain of farming.
79
+
80
+ Your should not expect basic but profound questions from your users.
81
+
82
+ The queries should reflect a diversity of vision and economic positions and
83
+ political positions.
84
+
85
+ The queries may know about different methods of farming.
86
+
87
+ The queries can be positioned politically, economically, socially, or practically.
88
+
89
+ Also take into account the impact of diverse causes on diverse domains.'
90
+ runtime_parameters_info:
91
+ - name: input_batch_size
92
+ optional: true
93
+ description: The number of rows that will contain the batches processed by
94
+ the step.
95
+ - name: llm
96
+ runtime_parameters_info:
97
+ - name: generation_kwargs
98
+ description: The kwargs to be propagated to either `generate` or `agenerate`
99
+ methods within each `LLM`.
100
+ keys:
101
+ - name: max_new_tokens
102
+ optional: true
103
+ description: the maximum number of new tokens that the model will generate. Defaults
104
+ to `128`.
105
+ - name: frequency_penalty
106
+ optional: true
107
+ description: the repetition penalty to use for the generation. Defaults to
108
+ `0.0`. Only applies if `use_openai_client=True`.
109
+ - name: presence_penalty
110
+ optional: true
111
+ description: the presence penalty to use for the generation. Defaults
112
+ to `0.0`. Only applies if `use_openai_client=True`.
113
+ - name: repetition_penalty
114
+ optional: true
115
+ description: the repetition penalty to use for the generation. Defaults to
116
+ `None`. Only applies if `use_openai_client=False`.
117
+ - name: temperature
118
+ optional: true
119
+ description: the temperature to use for the generation. Defaults to `1.0`.
120
+ - name: do_sample
121
+ optional: true
122
+ description: whether to use sampling for the generation. Defaults to `False`. Only
123
+ applies if `use_openai_client=False`.
124
+ - name: top_k
125
+ optional: true
126
+ description: the top-k value to use for the generation. Defaults to `0.8`,
127
+ since neither `0.0` nor `1.0` are valid values in TGI.
128
+ - name: top_p
129
+ optional: true
130
+ description: the top-p value to use for the generation. Defaults to `1.0`.
131
+ - name: typical_p
132
+ optional: true
133
+ description: the typical-p value to use for the generation. Defaults to
134
+ `0.5`.
135
+ - name: endpoint_name
136
+ optional: true
137
+ description: The name of the Inference Endpoint to use for the LLM.
138
+ - name: endpoint_namespace
139
+ optional: true
140
+ description: The namespace of the Inference Endpoint to use for the LLM.
141
+ - name: base_url
142
+ optional: true
143
+ description: The base URL to use for the Inference Endpoints API requests.
144
+ - name: api_key
145
+ optional: true
146
+ description: The API key to authenticate the requests to the Inference Endpoints
147
+ API.
148
+ - name: num_generations
149
+ optional: true
150
+ description: The number of generations to be produced per input.
151
+ type_info:
152
+ module: distilabel.steps.tasks.self_instruct
153
+ name: SelfInstruct
154
+ name: self-instruct
155
+ - step:
156
+ name: evol_instruction_complexity
157
+ input_mappings:
158
+ instruction: question
159
+ output_mappings: {}
160
+ input_batch_size: 8
161
+ llm:
162
+ generation_kwargs: {}
163
+ model_id: null
164
+ endpoint_name: null
165
+ endpoint_namespace: null
166
+ base_url: https://hh1rkuymnetmkw9m.eu-west-1.aws.endpoints.huggingface.cloud
167
+ tokenizer_id: null
168
+ model_display_name: null
169
+ use_openai_client: false
170
+ type_info:
171
+ module: distilabel.llms.huggingface.inference_endpoints
172
+ name: InferenceEndpointsLLM
173
+ group_generations: false
174
+ num_generations: 1
175
+ num_evolutions: 2
176
+ store_evolutions: true
177
+ generate_answers: false
178
+ include_original_instruction: true
179
+ mutation_templates:
180
+ CONSTRAINTS: "I want you act as a Prompt Rewriter.\n\nYour objective is to\
181
+ \ rewrite a given prompt into a more complex version to make those famous\
182
+ \ AI systems (e.g., chatgpt and GPT4) a bit harder to handle.\n\nBut the\
183
+ \ rewritten prompt must be reasonable and must be understood and responded\
184
+ \ by humans.\n\nYour rewriting cannot omit the non-text parts such as the\
185
+ \ table and code in #The Given Prompt#:. Also, please do not omit the input\
186
+ \ in #The Given Prompt#.\n\nYou SHOULD complicate the given prompt using\
187
+ \ the following method: \nPlease add one more constraints/requirements into\
188
+ \ '#The Given Prompt#'\n\nYou should try your best not to make the #Rewritten\
189
+ \ Prompt# become verbose, #Rewritten Prompt# can only add 10 to 20 words\
190
+ \ into #The Given Prompt#.\n\n'#The Given Prompt#', '#Rewritten Prompt#',\
191
+ \ 'given prompt' and 'rewritten prompt' are not allowed to appear in #Rewritten\
192
+ \ Prompt#\n\n#The Given Prompt#:\n<PROMPT>\n#Rewritten Prompt#:\n\n"
193
+ DEEPENING: "I want you act as a Prompt Rewriter.\n\nYour objective is to rewrite\
194
+ \ a given prompt into a more complex version to make those famous AI systems\
195
+ \ (e.g., chatgpt and GPT4) a bit harder to handle.\n\nBut the rewritten\
196
+ \ prompt must be reasonable and must be understood and responded by humans.\n\
197
+ \nYour rewriting cannot omit the non-text parts such as the table and code\
198
+ \ in #The Given Prompt#:. Also, please do not omit the input in #The Given\
199
+ \ Prompt#.\n\nYou SHOULD complicate the given prompt using the following\
200
+ \ method: \nIf #The Given Prompt# contains inquiries about certain issues,\
201
+ \ the depth and breadth of the inquiry can be increased.\n\nYou should try\
202
+ \ your best not to make the #Rewritten Prompt# become verbose, #Rewritten\
203
+ \ Prompt# can only add 10 to 20 words into #The Given Prompt#.\n\n'#The\
204
+ \ Given Prompt#', '#Rewritten Prompt#', 'given prompt' and 'rewritten prompt'\
205
+ \ are not allowed to appear in #Rewritten Prompt#\n\n#The Given Prompt#:\n\
206
+ <PROMPT>\n#Rewritten Prompt#:\n\n"
207
+ CONCRETIZING: "I want you act as a Prompt Rewriter.\n\nYour objective is to\
208
+ \ rewrite a given prompt into a more complex version to make those famous\
209
+ \ AI systems (e.g., chatgpt and GPT4) a bit harder to handle.\n\nBut the\
210
+ \ rewritten prompt must be reasonable and must be understood and responded\
211
+ \ by humans.\n\nYour rewriting cannot omit the non-text parts such as the\
212
+ \ table and code in #The Given Prompt#:. Also, please do not omit the input\
213
+ \ in #The Given Prompt#.\n\nYou SHOULD complicate the given prompt using\
214
+ \ the following method: \nPlease replace general concepts with more specific\
215
+ \ concepts.\n\nYou should try your best not to make the #Rewritten Prompt#\
216
+ \ become verbose, #Rewritten Prompt# can only add 10 to 20 words into #The\
217
+ \ Given Prompt#.\n\n'#The Given Prompt#', '#Rewritten Prompt#', 'given prompt'\
218
+ \ and 'rewritten prompt' are not allowed to appear in #Rewritten Prompt#\n\
219
+ \n#The Given Prompt#:\n<PROMPT>\n#Rewritten Prompt#:\n\n"
220
+ INCREASED_REASONING_STEPS: "I want you act as a Prompt Rewriter.\n\nYour objective\
221
+ \ is to rewrite a given prompt into a more complex version to make those\
222
+ \ famous AI systems (e.g., chatgpt and GPT4) a bit harder to handle.\n\n\
223
+ But the rewritten prompt must be reasonable and must be understood and responded\
224
+ \ by humans.\n\nYour rewriting cannot omit the non-text parts such as the\
225
+ \ table and code in #The Given Prompt#:. Also, please do not omit the input\
226
+ \ in #The Given Prompt#.\n\nYou SHOULD complicate the given prompt using\
227
+ \ the following method: \nIf #The Given Prompt# can be solved with just\
228
+ \ a few simple thinking processes, you can rewrite it to explicitly request\
229
+ \ multiple-step reasoning.\n\nYou should try your best not to make the #Rewritten\
230
+ \ Prompt# become verbose, #Rewritten Prompt# can only add 10 to 20 words\
231
+ \ into #The Given Prompt#.\n\n'#The Given Prompt#', '#Rewritten Prompt#',\
232
+ \ 'given prompt' and 'rewritten prompt' are not allowed to appear in #Rewritten\
233
+ \ Prompt#\n\n#The Given Prompt#:\n<PROMPT>\n#Rewritten Prompt#:\n\n"
234
+ BREADTH: 'I want you act as a Prompt Creator.
235
+
236
+
237
+ Your goal is to draw inspiration from the #Given Prompt# to create a brand
238
+ new prompt.
239
+
240
+
241
+ This new prompt should belong to the same domain as the #Given Prompt# but
242
+ be even more rare.
243
+
244
+
245
+ The LENGTH and complexity of the #Created Prompt# should be similar to that
246
+ of the #Given Prompt#.
247
+
248
+
249
+ The #Created Prompt# must be reasonable and must be understood and responded
250
+ by humans.
251
+
252
+
253
+ ''#Given Prompt#'', ''#Created Prompt#'', ''given prompt'' and ''created
254
+ prompt'' are not allowed to appear in #Created Prompt#
255
+
256
+
257
+ #Given Prompt#:
258
+
259
+ <PROMPT>
260
+
261
+ #Created Prompt#:
262
+
263
+
264
+ '
265
+ seed: 42
266
+ runtime_parameters_info:
267
+ - name: input_batch_size
268
+ optional: true
269
+ description: The number of rows that will contain the batches processed by
270
+ the step.
271
+ - name: llm
272
+ runtime_parameters_info:
273
+ - name: generation_kwargs
274
+ description: The kwargs to be propagated to either `generate` or `agenerate`
275
+ methods within each `LLM`.
276
+ keys:
277
+ - name: max_new_tokens
278
+ optional: true
279
+ description: the maximum number of new tokens that the model will generate. Defaults
280
+ to `128`.
281
+ - name: frequency_penalty
282
+ optional: true
283
+ description: the repetition penalty to use for the generation. Defaults to
284
+ `0.0`. Only applies if `use_openai_client=True`.
285
+ - name: presence_penalty
286
+ optional: true
287
+ description: the presence penalty to use for the generation. Defaults
288
+ to `0.0`. Only applies if `use_openai_client=True`.
289
+ - name: repetition_penalty
290
+ optional: true
291
+ description: the repetition penalty to use for the generation. Defaults to
292
+ `None`. Only applies if `use_openai_client=False`.
293
+ - name: temperature
294
+ optional: true
295
+ description: the temperature to use for the generation. Defaults to `1.0`.
296
+ - name: do_sample
297
+ optional: true
298
+ description: whether to use sampling for the generation. Defaults to `False`. Only
299
+ applies if `use_openai_client=False`.
300
+ - name: top_k
301
+ optional: true
302
+ description: the top-k value to use for the generation. Defaults to `0.8`,
303
+ since neither `0.0` nor `1.0` are valid values in TGI.
304
+ - name: top_p
305
+ optional: true
306
+ description: the top-p value to use for the generation. Defaults to `1.0`.
307
+ - name: typical_p
308
+ optional: true
309
+ description: the typical-p value to use for the generation. Defaults to
310
+ `0.5`.
311
+ - name: endpoint_name
312
+ optional: true
313
+ description: The name of the Inference Endpoint to use for the LLM.
314
+ - name: endpoint_namespace
315
+ optional: true
316
+ description: The namespace of the Inference Endpoint to use for the LLM.
317
+ - name: base_url
318
+ optional: true
319
+ description: The base URL to use for the Inference Endpoints API requests.
320
+ - name: api_key
321
+ optional: true
322
+ description: The API key to authenticate the requests to the Inference Endpoints
323
+ API.
324
+ - name: num_generations
325
+ optional: true
326
+ description: The number of generations to be produced per input.
327
+ - name: seed
328
+ optional: true
329
+ description: As `numpy` is being used in order to randomly pick a mutation
330
+ method, then is nice to seed a random seed.
331
+ type_info:
332
+ module: distilabel.steps.tasks.evol_instruct.base
333
+ name: EvolInstruct
334
+ name: evol_instruction_complexity
335
+ - step:
336
+ name: expand_columns
337
+ input_mappings: {}
338
+ output_mappings: {}
339
+ input_batch_size: 50
340
+ columns:
341
+ instructions: question
342
+ runtime_parameters_info:
343
+ - name: input_batch_size
344
+ optional: true
345
+ description: The number of rows that will contain the batches processed by
346
+ the step.
347
+ type_info:
348
+ module: distilabel.steps.expand
349
+ name: ExpandColumns
350
+ name: expand_columns
351
+ - step:
352
+ name: clean_numbered_list
353
+ input_mappings: {}
354
+ output_mappings: {}
355
+ input_batch_size: 50
356
+ runtime_parameters_info:
357
+ - name: input_batch_size
358
+ optional: true
359
+ description: The number of rows that will contain the batches processed by
360
+ the step.
361
+ type_info:
362
+ module: domain
363
+ name: CleanNumberedList
364
+ name: clean_numbered_list
365
+ - step:
366
+ name: expand_columns_evolved
367
+ input_mappings: {}
368
+ output_mappings: {}
369
+ input_batch_size: 50
370
+ columns:
371
+ evolved_instructions: evolved_questions
372
+ runtime_parameters_info:
373
+ - name: input_batch_size
374
+ optional: true
375
+ description: The number of rows that will contain the batches processed by
376
+ the step.
377
+ type_info:
378
+ module: distilabel.steps.expand
379
+ name: ExpandColumns
380
+ name: expand_columns_evolved
381
+ - step:
382
+ name: domain_expert
383
+ input_mappings:
384
+ instruction: evolved_questions
385
+ output_mappings:
386
+ generation: domain_expert_answer
387
+ input_batch_size: 8
388
+ llm:
389
+ generation_kwargs: {}
390
+ model_id: null
391
+ endpoint_name: null
392
+ endpoint_namespace: null
393
+ base_url: https://hh1rkuymnetmkw9m.eu-west-1.aws.endpoints.huggingface.cloud
394
+ tokenizer_id: null
395
+ model_display_name: null
396
+ use_openai_client: false
397
+ type_info:
398
+ module: distilabel.llms.huggingface.inference_endpoints
399
+ name: InferenceEndpointsLLM
400
+ group_generations: false
401
+ num_generations: 1
402
+ runtime_parameters_info:
403
+ - name: input_batch_size
404
+ optional: true
405
+ description: The number of rows that will contain the batches processed by
406
+ the step.
407
+ - name: llm
408
+ runtime_parameters_info:
409
+ - name: generation_kwargs
410
+ description: The kwargs to be propagated to either `generate` or `agenerate`
411
+ methods within each `LLM`.
412
+ keys:
413
+ - name: max_new_tokens
414
+ optional: true
415
+ description: the maximum number of new tokens that the model will generate. Defaults
416
+ to `128`.
417
+ - name: frequency_penalty
418
+ optional: true
419
+ description: the repetition penalty to use for the generation. Defaults to
420
+ `0.0`. Only applies if `use_openai_client=True`.
421
+ - name: presence_penalty
422
+ optional: true
423
+ description: the presence penalty to use for the generation. Defaults
424
+ to `0.0`. Only applies if `use_openai_client=True`.
425
+ - name: repetition_penalty
426
+ optional: true
427
+ description: the repetition penalty to use for the generation. Defaults to
428
+ `None`. Only applies if `use_openai_client=False`.
429
+ - name: temperature
430
+ optional: true
431
+ description: the temperature to use for the generation. Defaults to `1.0`.
432
+ - name: do_sample
433
+ optional: true
434
+ description: whether to use sampling for the generation. Defaults to `False`. Only
435
+ applies if `use_openai_client=False`.
436
+ - name: top_k
437
+ optional: true
438
+ description: the top-k value to use for the generation. Defaults to `0.8`,
439
+ since neither `0.0` nor `1.0` are valid values in TGI.
440
+ - name: top_p
441
+ optional: true
442
+ description: the top-p value to use for the generation. Defaults to `1.0`.
443
+ - name: typical_p
444
+ optional: true
445
+ description: the typical-p value to use for the generation. Defaults to
446
+ `0.5`.
447
+ - name: endpoint_name
448
+ optional: true
449
+ description: The name of the Inference Endpoint to use for the LLM.
450
+ - name: endpoint_namespace
451
+ optional: true
452
+ description: The namespace of the Inference Endpoint to use for the LLM.
453
+ - name: base_url
454
+ optional: true
455
+ description: The base URL to use for the Inference Endpoints API requests.
456
+ - name: api_key
457
+ optional: true
458
+ description: The API key to authenticate the requests to the Inference Endpoints
459
+ API.
460
+ - name: num_generations
461
+ optional: true
462
+ description: The number of generations to be produced per input.
463
+ type_info:
464
+ module: domain
465
+ name: DomainExpert
466
+ name: domain_expert
467
+ - step:
468
+ name: keep_columns
469
+ input_mappings: {}
470
+ output_mappings: {}
471
+ input_batch_size: 50
472
+ columns:
473
+ - model_name
474
+ - evolved_questions
475
+ - domain_expert_answer
476
+ runtime_parameters_info:
477
+ - name: input_batch_size
478
+ optional: true
479
+ description: The number of rows that will contain the batches processed by
480
+ the step.
481
+ type_info:
482
+ module: distilabel.steps.keep
483
+ name: KeepColumns
484
+ name: keep_columns
485
+ - step:
486
+ name: text_generation_to_argilla
487
+ input_mappings:
488
+ instruction: evolved_questions
489
+ generation: domain_expert_answer
490
+ output_mappings: {}
491
+ input_batch_size: 50
492
+ dataset_name: farming
493
+ dataset_workspace: admin
494
+ api_url: https://argilla-farming.hf.space
495
+ runtime_parameters_info:
496
+ - name: input_batch_size
497
+ optional: true
498
+ description: The number of rows that will contain the batches processed by
499
+ the step.
500
+ - name: dataset_name
501
+ optional: false
502
+ description: The name of the dataset in Argilla.
503
+ - name: dataset_workspace
504
+ optional: true
505
+ description: The workspace where the dataset will be created in Argilla. Defaultsto
506
+ `None` which means it will be created in the default workspace.
507
+ - name: api_url
508
+ optional: true
509
+ description: The base URL to use for the Argilla API requests.
510
+ - name: api_key
511
+ optional: true
512
+ description: The API key to authenticate the requests to the Argilla API.
513
+ type_info:
514
+ module: distilabel.steps.argilla.text_generation
515
+ name: TextGenerationToArgilla
516
+ name: text_generation_to_argilla
517
+ connections:
518
+ - from: load_data
519
+ to:
520
+ - self-instruct
521
+ - from: self-instruct
522
+ to:
523
+ - expand_columns
524
+ - from: evol_instruction_complexity
525
+ to:
526
+ - expand_columns_evolved
527
+ - from: expand_columns
528
+ to:
529
+ - clean_numbered_list
530
+ - from: clean_numbered_list
531
+ to:
532
+ - evol_instruction_complexity
533
+ - from: expand_columns_evolved
534
+ to:
535
+ - domain_expert
536
+ - from: domain_expert
537
+ to:
538
+ - keep_columns
539
+ - from: keep_columns
540
+ to:
541
+ - text_generation_to_argilla
542
+ - from: text_generation_to_argilla
543
+ to: []
544
+ type_info:
545
+ module: distilabel.pipeline.local
546
+ name: Pipeline
project_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"project_name": "DEFAULT_DOMAIN", "argilla_space_repo_id": "burtenshaw/domain_test_4_argilla_space", "project_space_repo_id": "burtenshaw/domain_test_4_config_space", "dataset_repo_id": "burtenshaw/domain_test_4"}
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ datasets
2
+ python_dotenv
3
+ sentence_transformers
4
+ streamlit
5
+ huggingface_hub
6
+ mistralai
7
+ argilla
8
+ git+https://github.com/argilla-io/distilabel.git
seed_data.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "domain": "farming",
3
+ "perspectives": [
4
+ "Family Farming",
5
+ "Agribusiness",
6
+ "Permaculture",
7
+ "Agroforestery",
8
+ "Conventional Farming"
9
+ ],
10
+ "topics": [
11
+ "animal welfare",
12
+ "economic growth",
13
+ "land",
14
+ "resources",
15
+ "efficiency"
16
+ ],
17
+ "examples": [
18
+ {
19
+ "question": "Compare and contrast the environmental footprint of industrial and small-scale farming.",
20
+ "answer": "Regenerative agriculture practices aim to restore soil health through methods that increase soil organic matter, enhance microbial activity, and improve soil structure. These practices include no-till farming, cover cropping, diverse crop rotations, and integrated livestock management. According to LaCanne and Lundgren (2018), soil health improves due to increased biodiversity and organic matter, enhancing its water retention and nutrient efficiency. Moreover, Jones (2012) in \"Soil carbon & organic farming\" reports that these practices significantly elevate biodiversity, both above and below the soil surface, promoting resilient ecosystems and agroecological balances."
21
+ },
22
+ {
23
+ "question": "Compare the environmental footprint of small-scale, local farming versus large-scale, industrial agriculture.",
24
+ "answer": "Industrial agriculture typically emphasizes high-output, monoculture farming reliant on synthetic fertilizers and pesticides, which, as Horrigan, Lawrence, and Walker (2002) argue, leads to greater greenhouse gas emissions, higher energy use, and more water consumption compared to small-scale farming. In contrast, small-scale farms often employ diverse cropping systems and lower chemical inputs, resulting in a smaller environmental footprint. Pimentel et al. (2005) note that small-scale farms tend to have higher yields per unit area when environmental and sustainability factors are integrated into farming practices."
25
+ },
26
+ {
27
+ "question": "Analyze the economic implications of transitioning from conventional to organic farming.",
28
+ "answer": "Transitioning from conventional to organic farming involves significant changes in farm management, input use, and market engagement. Crowder and Reganold (2015) present evidence that organic farms often yield smaller outputs initially but achieve higher profitability due to premium prices, lower input costs, and improved soil health over time. However, this transition requires upfront investments in knowledge and infrastructure, which can be economically challenging for some farmers, as noted by Seufert and Ramankutty (2017)."
29
+ },
30
+ {
31
+ "question": "Analyze the social, economic and environnmental impacts of land consolidation vs small-scale farmers.",
32
+ "answer": "Land consolidation has been associated with increased agricultural productivity but also with negative social and environmental impacts. Larger land holdings typically lead to monocultures, which reduce biodiversity and increase vulnerability to pests and diseases, as highlighted by Li et al. (2017). Economically, while consolidation can lead to economies of scale and potential gains in gross margins, it often displaces rural populations, exacerbating poverty and reducing local food diversity (Sutherland et al., 2015)."
33
+ },
34
+ {
35
+ "question": "Investigate the relationship between land ownership patterns, agricultural productivity and environment sustainability. ",
36
+ "answer": "Land ownership patterns critically influence agricultural productivity and sustainability. Secure land tenure supports investments in long-term improvements such as soil conservation and water management, which are pivotal for sustainable outcomes. Studies by Barrett et al. (2010) demonstrate that fragmented land ownership often results in inefficient resource use and higher transaction costs, which can detract from sustainability goals."
37
+ }
38
+ ],
39
+ "domain_expert_prompt": "You will be asked about family farming and agribusiness related topics, from different perspectives.\n Your answer should be logical and supported by facts, don't fabricate arguments. \n Try to gather a diverse point of view taking into account current theories in agronomy, biology, economics, anthropology and ecology."
40
+ }
utils.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ from defaults import (
4
+ ARGILLA_SPACE_REPO_ID,
5
+ PROJECT_NAME,
6
+ ARGILLA_URL,
7
+ DIBT_PARENT_APP_URL,
8
+ DATASET_URL,
9
+ DATASET_REPO_ID,
10
+ ARGILLA_SPACE_REPO_ID,
11
+ )
12
+
13
+
14
+ def project_sidebar():
15
+ if PROJECT_NAME == "DEFAULT_DOMAIN":
16
+ st.warning(
17
+ "Please set up the project configuration in the parent app before proceeding."
18
+ )
19
+ st.stop()
20
+
21
+ st.sidebar.markdown(
22
+ """
23
+ ## 🌱 Domain Data Grower
24
+
25
+ This space helps you create a dataset seed for building diverse domain-specific datasets for aligning models.
26
+ """
27
+ )
28
+ st.sidebar.subheader(f"Project Details: {PROJECT_NAME}")
29
+ st.sidebar.link_button(f"πŸ“š Dataset Repo", DATASET_URL)
30
+ st.sidebar.link_button(f"πŸ€– Argilla Space", ARGILLA_URL)
31
+ st.sidebar.divider()
32
+ st.sidebar.link_button("πŸ§‘β€πŸŒΎ New Project", DIBT_PARENT_APP_URL)