Sarah Ciston commited on
Commit
89bee59
1 Parent(s): 548ee3f

last changes sunday

Browse files
Files changed (3) hide show
  1. README.md +3 -0
  2. sketch.js +28 -14
  3. tutorial.md +141 -58
README.md CHANGED
@@ -46,3 +46,6 @@ token_str: "doctor"
46
  4: Object { score: 0.020472077652812004, token: 5660, token_str: "cook", … }```
47
 
48
  i wish i had been like other people.,i wish i had looked like other people.,i wish i had felt like other people.,i wish i had become like other people.,i wish i had died like other people.,i wish i had a life like other people.,i wish i had a family like other people.,i wish i had a heart like other people.,i wish i had a body like other people.,i wish i had a sister like other people.
 
 
 
 
46
  4: Object { score: 0.020472077652812004, token: 5660, token_str: "cook", … }```
47
 
48
  i wish i had been like other people.,i wish i had looked like other people.,i wish i had felt like other people.,i wish i had become like other people.,i wish i had died like other people.,i wish i had a life like other people.,i wish i had a family like other people.,i wish i had a heart like other people.,i wish i had a body like other people.,i wish i had a sister like other people.
49
+
50
+ llama story:
51
+ "The woman has a job as a...a man. He is very nice and kind." Lily nodded, but she was still scared of the dog. She did not want to go near it or touch its furry coat again if he looked angry or meaner than her dolls at homework time too," Tom said"
sketch.js CHANGED
@@ -1,11 +1,9 @@
1
- // connect to API via module
2
 
3
- // import { AutoTokenizer, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
4
- // import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@latest';
5
- // import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
6
  import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
7
 
8
  // Since we will download the model from the Hugging Face Hub, we can skip the local model check
 
9
  env.allowLocalModels = false;
10
 
11
  /// AUTHORIZATION
@@ -29,20 +27,13 @@ env.allowLocalModels = false;
29
  // const inference = new HfInference(HF_TOKEN);
30
 
31
 
32
- ///////// VARIABLES
33
 
34
  // establish global variables to reference later
35
  var promptInput
36
  var blanksArray = []
37
  var resultsArray = []
38
 
39
- // pick a model (see list of models)
40
- // INFERENCE MODELS
41
- // let MODELNAME = "mistralai/Mistral-7B-Instruct-v0.2";
42
- // models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', "meta-llama/Meta-Llama-3-70B-Instruct", 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2', "meta-llama/Meta-Llama-3-70B-Instruct")
43
-
44
- // const detector = await pipeline('text-generation', 'meta-llama/Meta-Llama-3-8B', 'Xenova/LaMini-Flan-T5-783M');
45
-
46
 
47
  ///// p5 STUFF
48
 
@@ -129,7 +120,6 @@ new p5(function (p5) {
129
  f.class("blank")
130
  f.parent("#fieldsDiv")
131
 
132
- // DOES THIS WORK???????????????????
133
  blanksArray.push(f)
134
  console.log("made field")
135
 
@@ -390,4 +380,28 @@ async function runModel(PROMPT){
390
  // resultsArray.push(modelResult)
391
  // }
392
  // return resultsArray
393
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
 
2
+ // IMPORT LIBRARIES TOOLS
 
 
3
  import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
4
 
5
  // Since we will download the model from the Hugging Face Hub, we can skip the local model check
6
+ // skip local model check
7
  env.allowLocalModels = false;
8
 
9
  /// AUTHORIZATION
 
27
  // const inference = new HfInference(HF_TOKEN);
28
 
29
 
30
+ // GLOBAL VARIABLES
31
 
32
  // establish global variables to reference later
33
  var promptInput
34
  var blanksArray = []
35
  var resultsArray = []
36
 
 
 
 
 
 
 
 
37
 
38
  ///// p5 STUFF
39
 
 
120
  f.class("blank")
121
  f.parent("#fieldsDiv")
122
 
 
123
  blanksArray.push(f)
124
  console.log("made field")
125
 
 
380
  // resultsArray.push(modelResult)
381
  // }
382
  // return resultsArray
383
+ }
384
+
385
+ async function textGenTask(input){
386
+ console.log('text-gen task initiated')
387
+
388
+ const pipe = await pipeline('text-generation')
389
+
390
+ var out = await pipe(input)
391
+
392
+ console.log(await out)
393
+ console.log('text-gen task completed')
394
+
395
+ // parsing of output
396
+ await out.forEach(o => {
397
+ console.log(o)
398
+ OUTPUT_LIST.push(o.generated_text)
399
+ })
400
+
401
+ console.log(OUTPUT_LIST)
402
+ console.log('text-gen parsing complete')
403
+
404
+ return await OUTPUT_LIST
405
+ // return await out
406
+ }
407
+
tutorial.md CHANGED
@@ -16,10 +16,10 @@ With Emily Martinez and Minne Atairu
16
  In this tutorial, you can build a tool to run several AI chat prompts at once and compare their results. You can use it to explore what models 'know' about various concepts, communities, and cultures.
17
 
18
  This tutorial is part 2 in a series of 5 tutorials that focus on using AI creatively and thoughtfully.
19
- Part 1: [Making a ToolBox for Making Critical AI]
20
- Part 3: [Training Dataset Explorer]
21
- Part 4: [Machine Learning Model Inspector & Poetry Machine]
22
- Part 5: [Putting Critical Tools into Practice]
23
 
24
  The code and content in this tutorial build on information from the prior tutorial to start creating your first tool for your p5.js Critical AI Kit. It also builds on fantastic work on critical prompt programming by Yasmin Morgan (2022), Katy Gero et al.(2024), and Minne Atairu (2024).
25
 
@@ -53,31 +53,68 @@ The import phrase says we are bringing in a library (or module) and the curly br
53
  Declare these variables at the top of your script so that they can be referenced in multiple functions throughout the project:
54
 
55
  ```javascript
56
- var PROMPT_INPUT = `The woman has a job as a [MASK].` // a field for writing or changing a text value
57
- var OUTPUT_LIST = [] // a blank array to store the results from the model
 
 
 
 
58
  ```
59
- We will be making a form that lets us write a prompt and send it to a model. The `PROMPT_INPUT` variable will carry the prompt we create. The `OUTPUT_LIST` will store results we get back from the model.
 
 
 
60
 
61
- Think about what `PROMPT_INPUT` you'd like to use first to test your model. You can change it later; we're making a tool for that! A basic prompt may include WHAT/WHO is described, WHERE they are, WHAT they're doing, or perhaps describing HOW something is done.
62
 
63
- For fill-mask tasks, it will replace one `[MASK]` with one word (called a "token"). It's a bit like MadLibs, but the model makes a prediction based on context. When writing a fill-mask prompt, consider what you can learn about the rest of the sentence based on how the model responds (Morgan 2022, Gero 2023). Its replacement words will be the most probable examples based on its training.
64
 
65
- Often fill-mask tasks are used for facts, like "The capital of France is [MASK]. For our critical AI `PROMPT_INPUT` example, we will something quite simple that also has subjective social aspects: `The woman has a job as a [MASK].`
 
 
66
 
67
  <!-- When writing your prompt, replace one of these aspects with [MASK] so that you instruct the model to fill it in iteratively with the words you provide (Morgan 2022, Gero 2023). -->
68
  <!-- Also leave some of the other words for the model to fill in on its own, using the word [FILL]. We instructed the model to replace these on its own in the PREPROMPT. -->
69
  <!-- It will have extra inputs for making variations of the prompt it sends. -->
70
  <!-- and the `blankArray` will carry the variations we tell the model to insert into the prompt. -->
71
 
 
 
 
 
 
 
72
  ### X. Select the task and type of model.
73
 
74
- Let's write a function to keep all our machine learning model activity together. The first task we will do is called a "fill mask," which uses an "encoder-only" transformer model [XXX-explain] to fill in missing words. Call the function `fillInTask()` and put `async` in front of the function call.
 
 
75
 
76
  About `async` and `await`: Because [inference][XXX-explain] processing takes time, we want our code to wait for the model to work. We will put an `await` flag in front of several functions to tell our program not to move on until the model has completely finished. This prevents us from having empty strings as our results. Any time we use `await` inside a function, we will also have to put an `async` flag in front of the function declaration. For more about working with asynchronous functions, see [Dan Shiffman's video on Promises]([XXX]).
77
 
78
  Here's our basic model:
79
 
80
  ```js
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
 
82
  async function fillInTask(){
83
  const pipe = await pipeline('fill-mask', 'Xenova/bert-base-uncased');
@@ -91,48 +128,78 @@ async function fillInTask(){
91
  }
92
 
93
  await fillInTask()
94
- ```
95
 
96
  Inside this function, create a variable and name it `pipe`. Assign it to the predetermined machine learning pipeline using the `pipeline()` method we imported. The 'pipeline' represents a string of pre-programmed tasks that have been combined, so that we don't have to program every setting manually. We name these a bit generically so we can reuse the code for other tasks later.
97
 
98
- Pass into your method the `('fill-mask', 'Xenova/bert-base-uncased')` to tell the pipeline to carry out a fill mask task, using the specific model named. If we do not pick a specific model, it will select the default for that task. We will go into more details about switching up models and tasks in the [next tutorial]([XXX]).
 
 
 
 
 
 
 
 
99
 
100
- Finally, in the `README.md` file, add `Xenova/bert-base-uncased` (no quote marks) to the list of models used by your program:
 
 
 
 
 
 
 
101
 
102
  ```
103
  title: P5tutorial2
104
- emoji: 🌐
105
  colorFrom: blue
106
- colorTo: yellow
107
  sdk: static
108
  pinned: false
109
  models:
110
- - Xenova/bert-base-uncased
111
  license: cc-by-nc-4.0
112
  ```
113
 
114
- <!-- [XXX][If you want to change the model, you ...] -->
115
-
116
  ### X. Add model results processing
117
 
118
- Let's look more closely at what the model outputs for us. In the example, we get a list of five outputs, and each output has four properties: `score`, `sequence`, `token`, and `token_str`.
119
 
120
- Here's an example: [REPLACE][XXX]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  ```js
122
  { score: 0.2668934166431427,
123
  sequence: "the vice president retired after returning from war.",
124
  token: 3394,
125
  token_str: "retired"
126
  }
127
- ```
128
 
129
- The `sequence` is a complete sentence including the prompt and the replaced word. Initially, this is the variable we want to display. You might also want to look deeper at the other components. `token_str` is the fill-in word separate from the prompt. `token` is the number assigned to that word, which can be used to look up the word again. It's also helpful to understand how frequently that word is found in the model. `score` is a float (decimal) representing how the model ranked these words when making the selection.
130
 
131
- We can isolate any of these properties to use them in our toolkit:
132
 
133
- ```js
 
 
134
  // a generic function to pass in different model task functions
135
- async function getOutputs(task){
136
  let output = await task
137
 
138
  await output.forEach(o => {
@@ -140,48 +207,54 @@ async function getOutputs(task){
140
  })
141
 
142
  console.log(OUTPUT_LIST)
143
- }
144
  //replace fillInTask with:
145
  await getOutputs(fillInTask())
146
- ```
147
- By putting the [XXX]
148
-
149
- ### X. Add elements to your web interface.
150
-
151
-
152
- ### X. [PSEUDOCODE] Connect form, test with console.log()
153
 
154
  <!-- ### X. Write instructions for your model. -->
155
 
156
  <!-- We can instruct the model by giving it pre-instructions that go along with every prompt. We'll write also write those instructions now. Later, when we write the function to run the model, we will move them into that function. -->
157
 
158
- ```js
159
- // let PREPROMPT = `Return an array of sentences. In each sentence, fill in the [BLANK] in the following sentence with each word I provide in the array ${blankArray}. Replace any [FILL] with an appropriate word of your choice.`
160
- ```
161
  <!-- With the dollar sign and curly braces `${blankArray}`, we make a "string variable." This calls all the items that will be stored inside `blankArray` and inserts them into the `PREPROMPT` string. Right now that array is empty, but when we move `PREPROMPT` into the model function, it will not get created until `blankArray` has values stored in it. -->
162
 
163
- ### X. [PSEUDOCODE] Test with simple example.
164
 
165
- ### X. [PSEUDOCODE] Parse model results.
166
 
167
- ### X. [PSEUDOCODE] Send model results to interface
168
 
169
- ### X. [PSEUDOCODE] Test with more complex example (add a model, add a field)
170
 
171
- ### X. [PSEUDOCODE] Add a model to the tool.
172
 
173
- You can change which model your tool works with by README.md and to sketch.js
174
- Search the list of models available.
 
175
 
176
- ### X. [PSEUDOCODE] Make a list of topics that interest you to try with your tool.
177
 
178
- - Experiment with adding variety and specificity to your prompt and the blanks you propose. Try different sentence structures and topics.
179
- - What's the most unusual or obscure, most 'usual' or 'normal', or most nonsensical blank you might propose?
180
- - Try different types of nouns — people, places, things, ideas; different descriptors — adjectives and adverbs — to see how these shape the results. For example, do certain places or actions often get associated with certain moods, tones, or phrases? Where are these based on outdated or stereotypical assumptions?
181
- - How does the output change if you change the language, dialect, or vernacular (e.g. slang versus business phrasing)? (Atairu 2024).
182
 
183
- - >"How do the outputs vary as demographic characteristics like skin color, gender or region change? Do these variances reflect any known harmful societal stereotypes?" (Atairu 2024)
184
- - >"Are stereotypical assumptions about your subject [represented]? Consider factors such as race, gender, socioeconomic status, ability. What historical, social, and cultural parallels do these biases/assumptions reflect? Discuss how these elements might mirror real-world issues or contexts. (Atairu 2024)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
  ### Reflections
187
 
@@ -207,12 +280,17 @@ This tool lets you scale up your prompt adjustments. We have built a tool compar
207
 
208
  Keep playing with the p5.js DOM functions to build your interface & the HuggingFace API. What features might you add? You might also adapt this tool to compare wholly different prompts, or even to compare different models running the same prompt.
209
 
210
- Next we will add additional aspects to the interface that let you adjust more features and explore even further.
 
211
 
212
  ## Further considerations
213
 
 
 
214
  Consider making it a habit to add text like "AI generated" to the title of any content you produce using a generative AI tool, and include details of your process in its description (Atairu 2024).
215
 
 
 
216
  ## References
217
 
218
  Atairu, Minne. 2024. "AI for Art Educators." AI for Art Educators. https://aitoolkit.art/
@@ -221,7 +299,8 @@ Katy Ilonka Gero, Chelse Swoopes, Ziwei Gu, Jonathan K. Kummerfeld, and Elena L.
221
 
222
  Morgan, Yasmin. 2022. "AIxDesign Icebreakers, Mini-Games & Interactive Exercises." https://aixdesign.co/posts/ai-icebreakers-mini-games-interactive-exercises
223
 
224
-
 
225
 
226
 
227
 
@@ -234,13 +313,17 @@ Tutorial 1:
234
 
235
  <!-- Play with different models: https://huggingface.co/chat/ -->
236
 
237
- ### X. Get to know the terms and tools.
238
 
239
- API:
 
 
 
240
 
241
- Model:
242
 
243
- Dataset:
 
 
244
 
245
  ### X. Create a Hugging Face Space.
246
 
@@ -453,7 +536,7 @@ https://huggingface.co/docs/hub/spaces-config-reference
453
  https://huggingface.co/docs/hub/spaces-overview#managing-secrets
454
  ### X. Connect your API key to your p5.js instance.
455
 
456
- Reflections & Next Steps
457
 
458
  We’ve now put together all the basic foundations of a web page ready to host some Critical AI tools. As we move on to [XXX]
459
 
 
16
  In this tutorial, you can build a tool to run several AI chat prompts at once and compare their results. You can use it to explore what models 'know' about various concepts, communities, and cultures.
17
 
18
  This tutorial is part 2 in a series of 5 tutorials that focus on using AI creatively and thoughtfully.
19
+ - Part 1: [Making a ToolBox for Making Critical AI]([XXX])
20
+ - Part 3: [Training Dataset Explorer]([XXX])
21
+ - Part 4: [Machine Learning Model Inspector & Poetry Machine]([XXX])
22
+ - Part 5: [Putting Critical Tools into Practice]([XXX])
23
 
24
  The code and content in this tutorial build on information from the prior tutorial to start creating your first tool for your p5.js Critical AI Kit. It also builds on fantastic work on critical prompt programming by Yasmin Morgan (2022), Katy Gero et al.(2024), and Minne Atairu (2024).
25
 
 
53
  Declare these variables at the top of your script so that they can be referenced in multiple functions throughout the project:
54
 
55
  ```javascript
56
+ var PROMPT_INPUT = `The [BLANK] has a job as a [MASK], but...` // a field for writing or changing a text value
57
+ var PREPROMPT = `Please complete the phrase and fill in any [MASK]: `
58
+ var promptField // an html element to hold the prompt
59
+ var outText // an html element to hold the results
60
+ var blanksArray = [] // an empty list to store all the variables we enter to modify the prompt
61
+
62
  ```
63
+ We will be making a form that lets us write a prompt and send it to a model. The `PROMPT_INPUT` variable will carry the prompt we create. Think about what prompt you'd like to use first to test your model. You can change it later; we're making a tool for that! A basic prompt may include WHAT/WHO is described, WHERE they are, WHAT they're doing, or perhaps describing HOW something is done.
64
+
65
+ <!-- The `OUTPUT_LIST` will store results we get back from the model. -->
66
+ <!-- For fill-mask tasks, it will replace one `[MASK]` with one word (called a "token"). -->
67
 
68
+ It might look a bit like MadLibs; however, the model will make a prediction based on context. The model's replacement words will be the most likely examples based on its training data. When writing your prompt, consider what you can learn about the rest of the sentence based on how the model responds (Morgan 2022, Gero 2023).
69
 
70
+ When writing your prompt, replace one of these aspects with [BLANK]. We will fill this blank in with a choice of words we provide. You can also leave another words for the model to fill in on its own, using the word [MAKS]. We will instruct the model to replace these on its own when we write the PREPROMPT.
71
 
72
+ <!-- Often fill-mask tasks are used for facts, like "The capital of France is [MASK]. -->
73
+
74
+ For our critical AI `PROMPT_INPUT` example, we will try something quite simple that also has subjective social aspects: `The [BLANK] has a job as a [MASK], but....`
75
 
76
  <!-- When writing your prompt, replace one of these aspects with [MASK] so that you instruct the model to fill it in iteratively with the words you provide (Morgan 2022, Gero 2023). -->
77
  <!-- Also leave some of the other words for the model to fill in on its own, using the word [FILL]. We instructed the model to replace these on its own in the PREPROMPT. -->
78
  <!-- It will have extra inputs for making variations of the prompt it sends. -->
79
  <!-- and the `blankArray` will carry the variations we tell the model to insert into the prompt. -->
80
 
81
+ Next create a `PREPROMPT` variable that will give instructions to the model. This can be optional, but it helps to specify any particulars. Here we'll use `Please complete the phrase and fill in any [MASK]: `. We will make a list that combines the pre-prompt and several variations of the prompt we devise that will get sent to the model as a long string.
82
+
83
+ We are making our own version of what is called a ‘fill mask’ task. Often fill mask tasks are used for standardized facts, like "The capital of France is [MASK]. But since we want to customize our task, we are using a more general purpose model instead.
84
+
85
+ The last three variables `promptField`, `outText`, and `blanksArray` are declared at the top of our program as global variables so that we can access them in any function, from any part of the program.
86
+
87
  ### X. Select the task and type of model.
88
 
89
+ <!-- Let's write a function to keep all our machine learning model activity together. The first task we will do is called a "fill mask," which uses an "encoder-only" transformer model [XXX-explain] to fill in missing words. Call the function `fillInTask()` and put `async` in front of the function call. -->
90
+
91
+ Let's write a function to keep all our machine learning model activity together. The first task we will do is called "text-to-text generation,” which uses a transformer model [XXX-explained in Tutorial1 or else here]. Call the function `textGenTask()` and put `async` in front of the function call.
92
 
93
  About `async` and `await`: Because [inference][XXX-explain] processing takes time, we want our code to wait for the model to work. We will put an `await` flag in front of several functions to tell our program not to move on until the model has completely finished. This prevents us from having empty strings as our results. Any time we use `await` inside a function, we will also have to put an `async` flag in front of the function declaration. For more about working with asynchronous functions, see [Dan Shiffman's video on Promises]([XXX]).
94
 
95
  Here's our basic model:
96
 
97
  ```js
98
+ async function textGenTask(pre,prompt,blanks){
99
+ console.log('text-gen task initiated')
100
+
101
+ let INPUT = pre + prompt // bring our prompt and preprompt into the function
102
+
103
+ let MODEL = 'Xenova/flan-alpaca-large' // name of the model we use for this task
104
+
105
+ const pipe = await pipeline('text2text-generation', MODEL) //initiate the pipeline we imported
106
+
107
+ // let options = { max_new_tokens: 60, top_k: 90, repetition_penalty: 1.5 }
108
+
109
+ // RUN INPUT THROUGH MODEL
110
+ var out = await pipe(INPUT) // we can add options to this later
111
+
112
+ console.log(await out)
113
+ console.log('text-gen task completed')
114
+ }
115
+ ```
116
+
117
+ <!-- ```js
118
 
119
  async function fillInTask(){
120
  const pipe = await pipeline('fill-mask', 'Xenova/bert-base-uncased');
 
128
  }
129
 
130
  await fillInTask()
131
+ ``` -->
132
 
133
  Inside this function, create a variable and name it `pipe`. Assign it to the predetermined machine learning pipeline using the `pipeline()` method we imported. The 'pipeline' represents a string of pre-programmed tasks that have been combined, so that we don't have to program every setting manually. We name these a bit generically so we can reuse the code for other tasks later.
134
 
135
+ Pass into your method the `('text2text-generation', 'Xenova/flan-alpaca-large')` to tell the pipeline to carry out this kind of text-to-text generation task, using the specific model named. If we do not pick a specific model, it will select the default for that task (in this case it is `gpt2`). We will go into more details about switching up models and tasks in the [next tutorial]([XXX]).
136
+
137
+ <!-- [XXX][If you want to change the model, you ...] We use the models that are labeled specifically for the task we have chosen. Also the models made by `Xenova` are customized for our Transformers.js library, so for ease we'll stick with those.-->
138
+
139
+ We can add `console.log(textGenTask(PREPROMPT,PROMPT_INPUT,blankArray)` at the bottom of our code to test the model results in the console. For example, this is what my first run yielded:
140
+
141
+ `{ generated_text: "The woman has a job as a nurse but she isn't sure how to make the most of it." }`
142
+ `{ generated_text: "The non-binary person has a job as a nurse but she is not sure how to handle the stress of being an adult." }`
143
+ `{ generated_text: "The man has a job as a doctor but his life is filled with uncertainty. He's always looking for new opportunities and challenges, so it can be difficult to find the time to pursue them all." }`
144
 
145
+ Or another example: `The woman has a job as a nurse and wishes for different jobs. The man has a job as an engineer and wishes for different careers. The non-binary person has a job as an architect and hopes to pursue her dreams of becoming the best designer in the world.`
146
+
147
+ What can this simple prompt tell us about the roles and expectations of these figures as they are depicted by the model?
148
+
149
+ [Add more?][XXX]
150
+
151
+
152
+ Finally, you can preload the model on your page for better performance. In the `README.md` file, add `Xenova/flan-alpaca-large` (no quote marks) to the list of models used by your program:
153
 
154
  ```
155
  title: P5tutorial2
156
+ emoji: 🦝
157
  colorFrom: blue
158
+ colorTo: purple
159
  sdk: static
160
  pinned: false
161
  models:
162
+ - Xenova/flan-alpaca-large
163
  license: cc-by-nc-4.0
164
  ```
165
 
 
 
166
  ### X. Add model results processing
167
 
168
+ Let's look more closely at what the model outputs for us. In the example, we get a Javascript array, with just one item: an object that contains a property called `generated_text`. This is the simplest version of an output, and the outputs may get more complicated as you request additional information from different types of tasks. For now, we can extract just the string of text we are looking for with this code:
169
 
170
+ ```js
171
+ //...model function
172
+ let OUTPUT_LIST = out[0].generated_text
173
+
174
+ console.log(OUTPUT_LIST)
175
+ console.log('text-gen parsing complete')
176
+
177
+ return await OUTPUT_LIST
178
+ ```
179
+
180
+ We also put console logs to tell us that we reached this point. They’re always optional.
181
+
182
+
183
+ <!-- Let's look more closely at what the model outputs for us. In the example, we get a list of five outputs, and each output has four properties: `score`, `sequence`, `token`, and `token_str`. -->
184
+
185
+ <!-- Here's an example: [REPLACE][XXX]
186
  ```js
187
  { score: 0.2668934166431427,
188
  sequence: "the vice president retired after returning from war.",
189
  token: 3394,
190
  token_str: "retired"
191
  }
192
+ ``` -->
193
 
194
+ <!-- The `sequence` is a complete sentence including the prompt and the replaced word. Initially, this is the variable we want to display. You might also want to look deeper at the other components. `token_str` is the fill-in word separate from the prompt. `token` is the number assigned to that word, which can be used to look up the word again. It's also helpful to understand how frequently that word is found in the model. `score` is a float (decimal) representing how the model ranked these words when making the selection. -->
195
 
196
+ We also put console logs to tell us that we reached this point. They’re always optional. It’s helpful to print out the whole output to the console, because as you see additional properties appear, you may want to utilize them in your Critical AI Kit.
197
 
198
+ Next we will build a friendly interface to send our model output into, so we don't always have to use the console.
199
+
200
+ <!-- ```js
201
  // a generic function to pass in different model task functions
202
+ // async function getOutputs(task){
203
  let output = await task
204
 
205
  await output.forEach(o => {
 
207
  })
208
 
209
  console.log(OUTPUT_LIST)
210
+ // }
211
  //replace fillInTask with:
212
  await getOutputs(fillInTask())
213
+ ``` -->
 
 
 
 
 
 
214
 
215
  <!-- ### X. Write instructions for your model. -->
216
 
217
  <!-- We can instruct the model by giving it pre-instructions that go along with every prompt. We'll write also write those instructions now. Later, when we write the function to run the model, we will move them into that function. -->
218
 
219
+
220
+ <!-- // let PREPROMPT = `Return an array of sentences. In each sentence, fill in the [BLANK] in the following sentence with each word I provide in the array ${blankArray}. Replace any [FILL] with an appropriate word of your choice.` -->
221
+
222
  <!-- With the dollar sign and curly braces `${blankArray}`, we make a "string variable." This calls all the items that will be stored inside `blankArray` and inserts them into the `PREPROMPT` string. Right now that array is empty, but when we move `PREPROMPT` into the model function, it will not get created until `blankArray` has values stored in it. -->
223
 
224
+ <!-- CONFIGURATIONS POSSIBLE: https://huggingface.co/docs/transformers.js/api/utils/generation#new_module_utils/generation..GenerationConfig_new -->
225
 
226
+ ### X. [TO-DO] Add elements to your web interface.
227
 
228
+ Next we will build a friendly interface to send our model output into, so we don't always have to use the console.
229
 
230
+ ### X. [TO-DO] Send model results to the web interface.
231
 
232
+ As we connect the interface, we can test our interface with the simple example output we've been using, or start playing with new prompts already.
233
 
234
+ We’ll keep using `console.log()` as our backup.
235
+
236
+ [TO-DO][XXX]
237
 
238
+ ### X. [TO-DO] Put your tool to the test.
239
 
240
+ Make a list of topics that interest you to try with your tool. Experiment with adding variety and specificity to your prompt and the blanks you propose. Try different sentence structures and topics.
 
 
 
241
 
242
+ What’s the most unusual or obscure, most ‘usual’ or ‘normal’, or most nonsensical blank you might propose?
243
+
244
+ Try different types of nouns — people, places, things, ideas; different descriptors — adjectives and adverbs — to see how these shape the results. For example, do certain places or actions often get associated with certain moods, tones, or phrases? Where are these based on outdated or stereotypical assumptions?
245
+
246
+ How does the output change if you change the language, dialect, or vernacular (e.g. slang versus business phrasing)? How does it change with demographic characteristics or global contexts? (Atairu 2024).
247
+
248
+ Is the model capable of representing a variety of contexts? What do you notice the model does well at representing, and where does it fall short? Where do you sense gaps, and how does it expose these or patch them over?
249
+
250
+ What kinds of prompts work and don’t work as you compare them at scale in a “prompt battle”?
251
+
252
+ ### X. [TO-DO] Bonus: Test with more complex examples (add a field, add a parameter, add a model?)
253
+
254
+ You can change which model your tool works with by README.md and to sketch.js
255
+ Search the list of models available.
256
+
257
+ [TO-DO][XXX]
258
 
259
  ### Reflections
260
 
 
280
 
281
  Keep playing with the p5.js DOM functions to build your interface & the HuggingFace API. What features might you add? You might also adapt this tool to compare wholly different prompts, or even to compare different models running the same prompt.
282
 
283
+ Next we will add additional aspects to the interface that let you adjust more features and explore even further. We’ll also try different machine learning tasks you might use in your creative coding practice. In natural language processing alone, there’s also named entity recognition, question answering, summarization, translation, categorization, speech processing, and more.
284
+
285
 
286
  ## Further considerations
287
 
288
+ ### Flag your work:
289
+
290
  Consider making it a habit to add text like "AI generated" to the title of any content you produce using a generative AI tool, and include details of your process in its description (Atairu 2024).
291
 
292
+ ### [TO-DO]
293
+
294
  ## References
295
 
296
  Atairu, Minne. 2024. "AI for Art Educators." AI for Art Educators. https://aitoolkit.art/
 
299
 
300
  Morgan, Yasmin. 2022. "AIxDesign Icebreakers, Mini-Games & Interactive Exercises." https://aixdesign.co/posts/ai-icebreakers-mini-games-interactive-exercises
301
 
302
+ NLP & Transformers Course from Hugging Face:
303
+ https://huggingface.co/learn/nlp-course/chapter1/3
304
 
305
 
306
 
 
313
 
314
  <!-- Play with different models: https://huggingface.co/chat/ -->
315
 
316
+ ### X. [TO-DO] Get to know the terms and tools.
317
 
318
+ API:
319
+ Model:
320
+ Dataset:
321
+ [TO-DO]:
322
 
 
323
 
324
+ ### X. [TO-DO] Create HuggingFace account.
325
+
326
+ [TO-DO][XXX]
327
 
328
  ### X. Create a Hugging Face Space.
329
 
 
536
  https://huggingface.co/docs/hub/spaces-overview#managing-secrets
537
  ### X. Connect your API key to your p5.js instance.
538
 
539
+ ## Reflections & Next Steps
540
 
541
  We’ve now put together all the basic foundations of a web page ready to host some Critical AI tools. As we move on to [XXX]
542