Sarah Ciston commited on
Commit
cc195e8
1 Parent(s): f0acf12

try inference version

Browse files
Files changed (2) hide show
  1. sketch.js +69 -73
  2. tutorial.md +2 -2
sketch.js CHANGED
@@ -1,12 +1,12 @@
1
  // connect to API via module
2
 
3
  // import { AutoTokenizer, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
4
- import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
5
- import { oauthLoginUrl, oauthHandleRedirectIfPresent } from 'https://esm.sh/@huggingface/hub';
6
 
 
7
 
 
8
 
9
- /// AUTHORIZATION
10
  const oauthResult = await oauthHandleRedirectIfPresent();
11
 
12
  if (!oauthResult) {
@@ -19,15 +19,13 @@ if (!oauthResult) {
19
  const HFAUTH = oauthResult.accessToken
20
  console.log(HFAUTH)
21
 
22
- // import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
23
- // import { HfInference } from 'https://esm.sh/@huggingface/inference';
24
- // const inference = new HfInference(HFAUTH);
25
 
26
  // PIPELINE MODELS
27
  // models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
28
  // list of models by task: 'https://huggingface.co/docs/transformers.js/index#supported-tasksmodels'
29
 
30
-
31
  // Since we will download the model from the Hugging Face Hub, we can skip the local model check
32
  // env.allowLocalModels = false;
33
 
@@ -45,8 +43,6 @@ var blanksArray = []
45
  // const detector = await pipeline('text-generation', 'meta-llama/Meta-Llama-3-8B', 'Xenova/LaMini-Flan-T5-783M');
46
 
47
 
48
-
49
-
50
  ///// p5 STUFF
51
 
52
  // create an instance of the p5 class as a workspace for all your p5.js code
@@ -175,35 +171,35 @@ new p5(function (p5) {
175
  ///// MODEL STUFF
176
 
177
 
178
- // async function runModel(PREPROMPT, PROMPT){
179
- // // inference API version
180
 
181
- // let MODELNAME = "HuggingFaceH4/zephyr-7b-beta"
182
- // // let MODELNAME = "openai-community/gpt2"
183
- // // let MODELNAME = 'mistral_inference'
184
 
185
- // let out = await inference.textGeneration({
186
- // model: MODELNAME,
187
- // messages: [{
188
- // role: "system",
189
- // content: PREPROMPT
190
- // },{
191
- // role: "user",
192
- // content: PROMPT
193
- // }],
194
- // max_new_tokens: 150
195
- // });
196
 
197
- // console.log(out)
198
 
199
- // // modelResult = await out.messages[0].content
200
 
201
- // var modelResult = await out.choices[0].message.content
202
- // // var modelResult = await out[0].generated_text
203
- // console.log(modelResult);
204
 
205
- // return modelResult
206
- // }
207
 
208
 
209
  //inference.fill_mask({
@@ -234,57 +230,57 @@ new p5(function (p5) {
234
 
235
 
236
 
237
- async function runModel(PREPROMPT, PROMPT){
238
- // // pipeline version
239
 
240
- // let MODELNAME = 'mistralai/Mistral-Nemo-Instruct-2407'
241
- let MODELNAME = "HuggingFaceH4/zephyr-7b-beta"
242
 
243
- // HFAUTH
244
 
245
- // 'meta-llama/Meta-Llama-3-70B-Instruct'
246
- // 'openai-community/gpt2'
247
- // 'Xenova/gpt-3.5-turbo'
248
- // , 'Xenova/distilgpt2'
249
- // 'mistralai/Mistral-7B-Instruct-v0.2'
250
- // 'HuggingFaceH4/zephyr-7b-beta'
251
 
252
- // pipeline/transformers version
253
- let pipe = await pipeline('text-generation', {
254
- model: MODELNAME,
255
- accessToken: HFAUTH
256
- });
257
- // seems to work with default model distilgpt2 ugh
258
 
259
 
260
- // let out = await pipe(inputText, {
261
- // max_tokens: 250,
262
- // return_full_text: false
263
- // // repetition_penalty: 1.5,
264
- // // num_return_sequences: 1 //must be 1 for greedy search
265
- // })
266
 
267
- // let inputText = PREPROMPT + PROMPT
268
 
269
- // let out = await pipe(inputText)
270
 
271
- let out = await pipe({
272
- messages: [{
273
- role: "system",
274
- content: PREPROMPT
275
- },{
276
- role: "user",
277
- content: PROMPT
278
- }],
279
- max_new_tokens: 100
280
- });
281
 
282
- console.log(out)
283
 
284
- var modelResult = await out.choices[0].message.content
285
- // var modelResult = await out[0].generated_text
286
- console.log(modelResult)
287
 
288
- return modelResult
289
 
290
- }
 
1
  // connect to API via module
2
 
3
  // import { AutoTokenizer, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
4
+ // import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
 
5
 
6
+ /// AUTHORIZATION
7
 
8
+ import { oauthLoginUrl, oauthHandleRedirectIfPresent } from 'https://esm.sh/@huggingface/hub';
9
 
 
10
  const oauthResult = await oauthHandleRedirectIfPresent();
11
 
12
  if (!oauthResult) {
 
19
  const HFAUTH = oauthResult.accessToken
20
  console.log(HFAUTH)
21
 
22
+ import { HfInference } from 'https://esm.sh/@huggingface/inference';
23
+ const inference = new HfInference(HFAUTH);
 
24
 
25
  // PIPELINE MODELS
26
  // models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
27
  // list of models by task: 'https://huggingface.co/docs/transformers.js/index#supported-tasksmodels'
28
 
 
29
  // Since we will download the model from the Hugging Face Hub, we can skip the local model check
30
  // env.allowLocalModels = false;
31
 
 
43
  // const detector = await pipeline('text-generation', 'meta-llama/Meta-Llama-3-8B', 'Xenova/LaMini-Flan-T5-783M');
44
 
45
 
 
 
46
  ///// p5 STUFF
47
 
48
  // create an instance of the p5 class as a workspace for all your p5.js code
 
171
  ///// MODEL STUFF
172
 
173
 
174
+ async function runModel(PREPROMPT, PROMPT){
175
+ // inference API version
176
 
177
+ let MODELNAME = "HuggingFaceH4/zephyr-7b-beta"
178
+ // let MODELNAME = "openai-community/gpt2"
179
+ // let MODELNAME = 'mistral_inference'
180
 
181
+ let out = await inference.textGeneration({
182
+ model: MODELNAME,
183
+ messages: [{
184
+ role: "system",
185
+ content: PREPROMPT
186
+ },{
187
+ role: "user",
188
+ content: PROMPT
189
+ }],
190
+ max_new_tokens: 128
191
+ });
192
 
193
+ console.log(out)
194
 
195
+ // modelResult = await out.messages[0].content
196
 
197
+ var modelResult = await out.choices[0].message.content
198
+ // var modelResult = await out[0].generated_text
199
+ console.log(modelResult);
200
 
201
+ return modelResult
202
+ }
203
 
204
 
205
  //inference.fill_mask({
 
230
 
231
 
232
 
233
+ // async function runModel(PREPROMPT, PROMPT){
234
+ // // // pipeline version
235
 
236
+ // // let MODELNAME = 'mistralai/Mistral-Nemo-Instruct-2407'
237
+ // let MODELNAME = "HuggingFaceH4/zephyr-7b-beta"
238
 
239
+ // // HFAUTH
240
 
241
+ // // 'meta-llama/Meta-Llama-3-70B-Instruct'
242
+ // // 'openai-community/gpt2'
243
+ // // 'Xenova/gpt-3.5-turbo'
244
+ // // , 'Xenova/distilgpt2'
245
+ // // 'mistralai/Mistral-7B-Instruct-v0.2'
246
+ // // 'HuggingFaceH4/zephyr-7b-beta'
247
 
248
+ // // pipeline/transformers version
249
+ // let pipe = await pipeline('text-generation', {
250
+ // model: MODELNAME,
251
+ // accessToken: HFAUTH
252
+ // });
253
+ // // seems to work with default model distilgpt2 ugh
254
 
255
 
256
+ // // let out = await pipe(inputText, {
257
+ // // max_tokens: 250,
258
+ // // return_full_text: false
259
+ // // // repetition_penalty: 1.5,
260
+ // // // num_return_sequences: 1 //must be 1 for greedy search
261
+ // // })
262
 
263
+ // // let inputText = PREPROMPT + PROMPT
264
 
265
+ // // let out = await pipe(inputText)
266
 
267
+ // let out = await pipe({
268
+ // messages: [{
269
+ // role: "system",
270
+ // content: PREPROMPT
271
+ // },{
272
+ // role: "user",
273
+ // content: PROMPT
274
+ // }],
275
+ // max_new_tokens: 100
276
+ // });
277
 
278
+ // console.log(out)
279
 
280
+ // var modelResult = await out.choices[0].message.content
281
+ // // var modelResult = await out[0].generated_text
282
+ // console.log(modelResult)
283
 
284
+ // return modelResult
285
 
286
+ // }
tutorial.md CHANGED
@@ -21,7 +21,7 @@ Part 3: [Training Dataset Explorer]
21
  Part 4: [Machine Learning Model Inspector & Poetry Machine]
22
  Part 5: [Putting Critical Tools into Practice]
23
 
24
- The code and content in this tutorial build on information from the prior tutorial to start creating your first tool for your p5.js Critical AI Kit. It also builds on fantastic work on critical prompt programming by Yasmin Morgan (2022), Katy Gero (2023), and Minne Atairu (2024).
25
 
26
  ## Why compare prompts?
27
 
@@ -129,7 +129,7 @@ Consider making it a habit to add text like "AI generated" to the title of any c
129
 
130
  ## References
131
 
132
- > Ref Katy's project (Gero 2023).
133
 
134
  Morgan, Yasmin. 2022. "AIxDesign Icebreakers, Mini-Games & Interactive Exercises." https://aixdesign.co/posts/ai-icebreakers-mini-games-interactive-exercises
135
 
 
21
  Part 4: [Machine Learning Model Inspector & Poetry Machine]
22
  Part 5: [Putting Critical Tools into Practice]
23
 
24
+ The code and content in this tutorial build on information from the prior tutorial to start creating your first tool for your p5.js Critical AI Kit. It also builds on fantastic work on critical prompt programming by Yasmin Morgan (2022), Katy Gero et al.(2024), and Minne Atairu (2024).
25
 
26
  ## Why compare prompts?
27
 
 
129
 
130
  ## References
131
 
132
+ Katy Ilonka Gero, Chelse Swoopes, Ziwei Gu, Jonathan K. Kummerfeld, and Elena L. Glassman. 2024. Supporting Sensemaking of Large Language Model Outputs at Scale. In Proceedings of the CHI Conference on Human Factors in Computing Systems (CHI '24). Association for Computing Machinery, New York, NY, USA, Article 838, 1–21. https://doi.org/10.1145/3613904.3642139
133
 
134
  Morgan, Yasmin. 2022. "AIxDesign Icebreakers, Mini-Games & Interactive Exercises." https://aixdesign.co/posts/ai-icebreakers-mini-games-interactive-exercises
135