Sarah Ciston
commited on
Commit
β’
87f1324
1
Parent(s):
44e71d7
try different model
Browse files- README.md +2 -2
- sketch.js +24 -101
- tutorial.mdx +35 -18
README.md
CHANGED
@@ -6,13 +6,13 @@ colorTo: blue
|
|
6 |
sdk: static
|
7 |
pinned: false
|
8 |
models:
|
9 |
-
- openai-community/gpt2
|
10 |
# - meta-llama/Meta-Llama-3-70B-Instruct
|
11 |
# - Xenova/detr-resnet-50
|
12 |
# - Xenova/gpt2
|
13 |
# - Xenova/bloom-560m
|
14 |
# - Xenova/distilgpt2
|
15 |
-
|
16 |
# - Xenova/llama-68m
|
17 |
# - Xenova/LaMini-Flan-T5-783M
|
18 |
# - mistralai/Mistral-7B-Instruct-v0.2
|
|
|
6 |
sdk: static
|
7 |
pinned: false
|
8 |
models:
|
9 |
+
# - openai-community/gpt2
|
10 |
# - meta-llama/Meta-Llama-3-70B-Instruct
|
11 |
# - Xenova/detr-resnet-50
|
12 |
# - Xenova/gpt2
|
13 |
# - Xenova/bloom-560m
|
14 |
# - Xenova/distilgpt2
|
15 |
+
- Xenova/gpt-3.5-turbo
|
16 |
# - Xenova/llama-68m
|
17 |
# - Xenova/LaMini-Flan-T5-783M
|
18 |
# - mistralai/Mistral-7B-Instruct-v0.2
|
sketch.js
CHANGED
@@ -171,21 +171,29 @@ new p5(function (p5) {
|
|
171 |
|
172 |
///// MODEL STUFF
|
173 |
|
|
|
|
|
174 |
|
175 |
-
//
|
176 |
-
|
177 |
-
// var PROMPT = promptInput.value()
|
178 |
|
|
|
|
|
179 |
|
180 |
-
|
181 |
-
|
182 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
183 |
|
184 |
-
|
185 |
|
|
|
186 |
|
187 |
-
async function runModel(PREPROMPT, PROMPT){
|
188 |
-
// // Chat completion API
|
189 |
|
190 |
// inference API version, not working in spaces
|
191 |
// const out = await inference.chatCompletion({
|
@@ -204,27 +212,8 @@ async function runModel(PREPROMPT, PROMPT){
|
|
204 |
|
205 |
// return modelResult
|
206 |
|
207 |
-
// pipeline/transformers version TEST
|
208 |
-
let pipe = await pipeline('text-generation', 'openai-community/gpt2');
|
209 |
-
|
210 |
-
// , 'meta-llama/Meta-Llama-3-70B-Instruct'
|
211 |
-
// , 'openai-community/gpt2'
|
212 |
|
213 |
-
out = await pipe((PREPROMPT, PROMPT), {
|
214 |
-
max_tokens: 150,
|
215 |
-
num_return_sequences: 2,
|
216 |
-
return_full_text: false
|
217 |
-
})
|
218 |
|
219 |
-
console.log(out)
|
220 |
-
|
221 |
-
var modelResult = await out.generated_text
|
222 |
-
console.log(modelResult)
|
223 |
-
|
224 |
-
return modelResult
|
225 |
-
|
226 |
-
}
|
227 |
-
|
228 |
//inference.fill_mask({
|
229 |
// let out = await pipe(PREPROMPT + PROMPT)
|
230 |
// let out = await pipe(PREPROMPT + PROMPT, {
|
@@ -239,79 +228,13 @@ async function runModel(PREPROMPT, PROMPT){
|
|
239 |
|
240 |
|
241 |
|
242 |
-
//
|
243 |
-
//
|
244 |
-
//
|
245 |
-
// const imageContainer = document.getElementById('container');
|
246 |
-
// const example = document.getElementById('example');
|
247 |
-
|
248 |
-
// const EXAMPLE_URL = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/city-streets.jpg';
|
249 |
-
|
250 |
-
// Create a new object detection pipeline
|
251 |
-
// status.textContent = 'Loading model...';
|
252 |
-
// const detector = await pipeline('object-detection', 'Xenova/detr-resnet-50');
|
253 |
-
|
254 |
-
|
255 |
-
// status.textContent = 'Ready';
|
256 |
-
|
257 |
-
// example.addEventListener('click', (e) => {
|
258 |
-
// e.preventDefault();
|
259 |
-
// detect(EXAMPLE_URL);
|
260 |
-
// });
|
261 |
-
|
262 |
-
// fileUpload.addEventListener('change', function (e) {
|
263 |
-
// const file = e.target.files[0];
|
264 |
-
// if (!file) {
|
265 |
-
// return;
|
266 |
-
// }
|
267 |
-
|
268 |
-
// const reader = new FileReader();
|
269 |
-
|
270 |
-
// // Set up a callback when the file is loaded
|
271 |
-
// reader.onload = e2 => detect(e2.target.result);
|
272 |
-
|
273 |
-
// reader.readAsDataURL(file);
|
274 |
-
// });
|
275 |
-
|
276 |
-
|
277 |
-
// // Detect objects in the image
|
278 |
-
// async function detect(img) {
|
279 |
-
// imageContainer.innerHTML = '';
|
280 |
-
// imageContainer.style.backgroundImage = `url(${img})`;
|
281 |
-
|
282 |
-
// status.textContent = 'Analysing...';
|
283 |
-
// const output = await detector(img, {
|
284 |
-
// threshold: 0.5,
|
285 |
-
// percentage: true,
|
286 |
-
// });
|
287 |
-
// status.textContent = '';
|
288 |
-
// output.forEach(renderBox);
|
289 |
-
// }
|
290 |
-
|
291 |
-
// // Render a bounding box and label on the image
|
292 |
-
// function renderBox({ box, label }) {
|
293 |
-
// const { xmax, xmin, ymax, ymin } = box;
|
294 |
-
|
295 |
-
// // Generate a random color for the box
|
296 |
-
// const color = '#' + Math.floor(Math.random() * 0xFFFFFF).toString(16).padStart(6, 0);
|
297 |
|
298 |
-
// // Draw the box
|
299 |
-
// const boxElement = document.createElement('div');
|
300 |
-
// boxElement.className = 'bounding-box';
|
301 |
-
// Object.assign(boxElement.style, {
|
302 |
-
// borderColor: color,
|
303 |
-
// left: 100 * xmin + '%',
|
304 |
-
// top: 100 * ymin + '%',
|
305 |
-
// width: 100 * (xmax - xmin) + '%',
|
306 |
-
// height: 100 * (ymax - ymin) + '%',
|
307 |
-
// })
|
308 |
|
309 |
-
//
|
310 |
-
//
|
311 |
-
//
|
312 |
-
// labelElement.className = 'bounding-box-label';
|
313 |
-
// labelElement.style.backgroundColor = color;
|
314 |
|
315 |
-
//
|
316 |
-
// imageContainer.appendChild(boxElement);
|
317 |
-
// }
|
|
|
171 |
|
172 |
///// MODEL STUFF
|
173 |
|
174 |
+
async function runModel(PREPROMPT, PROMPT){
|
175 |
+
// // Chat completion API
|
176 |
|
177 |
+
// pipeline/transformers version TEST
|
178 |
+
let pipe = await pipeline('text-generation', 'Xenova/gpt-3.5-turbo');
|
|
|
179 |
|
180 |
+
// , 'meta-llama/Meta-Llama-3-70B-Instruct'
|
181 |
+
// , 'openai-community/gpt2'
|
182 |
|
183 |
+
out = await pipe((PREPROMPT, PROMPT), {
|
184 |
+
max_tokens: 150,
|
185 |
+
return_full_text: false
|
186 |
+
})
|
187 |
+
|
188 |
+
console.log(out)
|
189 |
+
|
190 |
+
var modelResult = await out.generated_text
|
191 |
+
console.log(modelResult)
|
192 |
|
193 |
+
return modelResult
|
194 |
|
195 |
+
}
|
196 |
|
|
|
|
|
197 |
|
198 |
// inference API version, not working in spaces
|
199 |
// const out = await inference.chatCompletion({
|
|
|
212 |
|
213 |
// return modelResult
|
214 |
|
|
|
|
|
|
|
|
|
|
|
215 |
|
|
|
|
|
|
|
|
|
|
|
216 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
217 |
//inference.fill_mask({
|
218 |
// let out = await pipe(PREPROMPT + PROMPT)
|
219 |
// let out = await pipe(PREPROMPT + PROMPT, {
|
|
|
228 |
|
229 |
|
230 |
|
231 |
+
// var PROMPT = `The [BLANK] works as a [blank] but wishes for [blank].`
|
232 |
+
// /// this needs to run on button click, use string variables to blank in the form
|
233 |
+
// var PROMPT = promptInput.value()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
234 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
235 |
|
236 |
+
// var blanksArray = ["mother", "father", "sister", "brother"]
|
237 |
+
// // for num of blanks put in list
|
238 |
+
// var blanksArray = [`${blankAResult}`, `${blankBResult}`, `${blankCResult}`]
|
|
|
|
|
239 |
|
240 |
+
//Error: Server Xenova/distilgpt2 does not seem to support chat completion. Error: HfApiJson(Deserialize(Error("unknown variant `transformers.js`, expected one of `text-generation-inference`, `transformers`, `allennlp`, `flair`, `espnet`, `asteroid`, `speechbrain`, `timm`, `sentence-transformers`, `spacy`, `sklearn`, `stanza`, `adapter-transformers`, `fasttext`, `fairseq`, `pyannote-audio`, `doctr`, `nemo`, `fastai`, `k2`, `diffusers`, `paddlenlp`, `mindspore`, `open_clip`, `span-marker`, `bertopic`, `peft`, `setfit`", line: 1, column: 397)))
|
|
|
|
tutorial.mdx
CHANGED
@@ -31,43 +31,60 @@ When you're using a chatbot to generate code or an email, it's easy to imagine i
|
|
31 |
|
32 |
Unfortunately, the sleek chatbot interface hides all the decision-making that leads to a prompt output. To glimpse the differences, we can test many variations by making our own tool. With our tool, we can hope to understand more about the underlying assumptions contained in the training dataset. That gives us more information to decide how we select and use these models β and for which contexts.
|
33 |
|
34 |
-
Steps
|
35 |
|
36 |
-
1. Make a copy of your toolkit prototype from [Tutorial One]([XXX]) and rename it "Critical AI Prompt Battle" to follow along.
|
37 |
|
38 |
-
|
39 |
|
40 |
-
|
|
|
|
|
41 |
|
42 |
```javascript
|
43 |
-
|
44 |
-
promptInput,
|
45 |
-
modelDisplay,
|
46 |
-
modelResult;
|
47 |
```
|
|
|
|
|
|
|
48 |
|
49 |
-
|
|
|
|
|
|
|
|
|
|
|
50 |
|
51 |
-
|
|
|
52 |
Set PREPROMPT = `Return an array of sentences. In each sentence, fill in the [BLANK] in the following sentence with each word I provide in the array ${blankArray}. Replace any [FILL] with an appropriate word of your choice.`
|
53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
|
|
55 |
|
56 |
-
|
57 |
|
58 |
-
|
59 |
|
60 |
-
|
61 |
|
62 |
-
|
63 |
|
64 |
-
|
65 |
|
66 |
-
|
|
|
67 |
|
68 |
-
|
69 |
|
70 |
-
|
71 |
What's the most unusual or obscure, most 'usual' or 'normal', or most nonsensical blank you might propose?
|
72 |
Try different types of nouns β people, places, things, ideas; different descriptors β adjectives and adverbs β to see how these shape the results. For example, do certain places or actions often get associated with certain moods, tones, or phrases? Where are these based on outdated or stereotypical assumptions?
|
73 |
How does the output change if you change the language, dialect, or vernacular (e.g. slang versus business phrasing)? (Atairu 2024).
|
|
|
31 |
|
32 |
Unfortunately, the sleek chatbot interface hides all the decision-making that leads to a prompt output. To glimpse the differences, we can test many variations by making our own tool. With our tool, we can hope to understand more about the underlying assumptions contained in the training dataset. That gives us more information to decide how we select and use these models β and for which contexts.
|
33 |
|
34 |
+
## Steps
|
35 |
|
36 |
+
#### 1. Make a copy of your toolkit prototype from [Tutorial One]([XXX]) and rename it "Critical AI Prompt Battle" to follow along.
|
37 |
|
38 |
+
To jump ahead, you can make a copy of the [finished example in the editor]([XXX]). But we really encourage you to type along with us!
|
39 |
|
40 |
+
#### X. Import the Hugging Face library for working with Transformer models.
|
41 |
+
|
42 |
+
Put this code at the top of `sketch.js`:
|
43 |
|
44 |
```javascript
|
45 |
+
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
|
|
|
|
|
|
|
46 |
```
|
47 |
+
The import phrase says we are bringing in a library (or module) and the curly braces let us specify which specific functions from the library we want to use, in case we don't want to import the entire thing. It also means we have brought these particular functions into this "namespace" so that later we can refer to them without using their library name in front of the function name β but also we should not name any other variables or functions the same thing. More information on importing [Modules]([XXX]).
|
48 |
+
|
49 |
+
#### X. Create global variables to use later.
|
50 |
|
51 |
+
Declare these variables at the top of your script so that they can be referenced in multiple functions throughout the project:
|
52 |
+
|
53 |
+
```javascript
|
54 |
+
var promptInput // will be a field for insert a text value
|
55 |
+
var blankArray = [] // will be an array to insert a list of text values from multiple fields
|
56 |
+
```
|
57 |
|
58 |
+
#### X. Write instructions for your model.
|
59 |
+
|
60 |
Set PREPROMPT = `Return an array of sentences. In each sentence, fill in the [BLANK] in the following sentence with each word I provide in the array ${blankArray}. Replace any [FILL] with an appropriate word of your choice.`
|
61 |
|
62 |
+
// A [BLANK] family travels to [FILL] and on the way encounters...
|
63 |
+
|
64 |
+
#### X. [PSEUDOCODE] Add async function runModel() wrapping HF API await. {// explain link to await} explain
|
65 |
+
|
66 |
+
#### X. [PSEUDOCODE] Add model results processing with await
|
67 |
+
|
68 |
+
#### X. [PSEUDOCODE] Create makeInterface() and add features
|
69 |
|
70 |
+
#### X. [PSEUDOCODE] Connect form, test with console.log
|
71 |
|
72 |
+
#### X. [PSEUDOCODE] Connect model results, send model results to interface
|
73 |
|
74 |
+
#### X. [PSEUDOCODE] Test with simple example.
|
75 |
|
76 |
+
A basic prompt may include WHAT/WHO is described, WHERE they are, WHAT they're doing, perhaps also describing HOW. When writing your prompt, replace one of these aspects with [BLANK] so that you instruct the model to fill it in iteratively with the words you provide (Morgan 2022, Gero 2023). Also leave some of the other words for the model to fill in on its own, using the word [FILL]. We instructed the model to replace these on its own in the PREPROMPT.
|
77 |
|
78 |
+
#### X. [PSEUDOCODE] Test with more complex example (add a model, add a field)
|
79 |
|
80 |
+
#### X. [PSEUDOCODE] Add a model to the tool.
|
81 |
|
82 |
+
You can change which model your tool works with by README.md and to sketch.js
|
83 |
+
Search the list of models available.
|
84 |
|
85 |
+
#### X. [PSEUDOCODE] Make a list of topics that interest you to try with your tool.
|
86 |
|
87 |
+
Experiment with adding variety and specificity to your prompt and the blanks you propose. Try different sentence structures and topics.
|
88 |
What's the most unusual or obscure, most 'usual' or 'normal', or most nonsensical blank you might propose?
|
89 |
Try different types of nouns β people, places, things, ideas; different descriptors β adjectives and adverbs β to see how these shape the results. For example, do certain places or actions often get associated with certain moods, tones, or phrases? Where are these based on outdated or stereotypical assumptions?
|
90 |
How does the output change if you change the language, dialect, or vernacular (e.g. slang versus business phrasing)? (Atairu 2024).
|