Sarah Ciston
commited on
Commit
•
4af8f9e
1
Parent(s):
5fd064e
try again...blob prob?
Browse files- sketch.js +54 -51
- tutorial.md +8 -0
sketch.js
CHANGED
@@ -172,64 +172,15 @@ new p5(function (p5) {
|
|
172 |
|
173 |
///// MODEL STUFF
|
174 |
|
175 |
-
// async function runModel(PREPROMPT, PROMPT){
|
176 |
-
// // // Chat completion API
|
177 |
-
|
178 |
-
// let MODELNAME = 'mistralai/Mistral-Nemo-Instruct-2407'
|
179 |
-
|
180 |
-
// // 'meta-llama/Meta-Llama-3-70B-Instruct'
|
181 |
-
// // 'openai-community/gpt2'
|
182 |
-
// // 'Xenova/gpt-3.5-turbo'
|
183 |
-
// // , 'Xenova/distilgpt2'
|
184 |
-
// // 'mistralai/Mistral-7B-Instruct-v0.2'
|
185 |
-
// // 'HuggingFaceH4/zephyr-7b-beta'
|
186 |
-
|
187 |
-
// // pipeline/transformers version
|
188 |
-
// let pipe = await pipeline('text-generation', MODELNAME);
|
189 |
-
// // seems to work with default model distilgpt2 ugh
|
190 |
-
|
191 |
-
|
192 |
-
// // let out = await pipe(inputText, {
|
193 |
-
// // max_tokens: 250,
|
194 |
-
// // return_full_text: false
|
195 |
-
// // // repetition_penalty: 1.5,
|
196 |
-
// // // num_return_sequences: 1 //must be 1 for greedy search
|
197 |
-
// // })
|
198 |
-
|
199 |
-
// // let inputText = PREPROMPT + PROMPT
|
200 |
-
|
201 |
-
// // let out = await pipe(inputText)
|
202 |
-
|
203 |
-
// let out = await pipe({
|
204 |
-
// messages: [{
|
205 |
-
// role: "system",
|
206 |
-
// content: PREPROMPT
|
207 |
-
// },{
|
208 |
-
// role: "user",
|
209 |
-
// content: PROMPT
|
210 |
-
// }],
|
211 |
-
// max_new_tokens: 100
|
212 |
-
// });
|
213 |
-
|
214 |
-
// console.log(out)
|
215 |
-
|
216 |
-
// var modelResult = await out[0].generated_text
|
217 |
-
// console.log(modelResult)
|
218 |
-
|
219 |
-
// return modelResult
|
220 |
-
|
221 |
-
// }
|
222 |
|
223 |
async function runModel(PREPROMPT, PROMPT){
|
224 |
-
// inference API version
|
225 |
|
226 |
-
// let MODELNAME = "HuggingFaceH4/zephyr-7b-gemma-v0.1"
|
227 |
let MODELNAME = "HuggingFaceH4/zephyr-7b-beta"
|
228 |
// let MODELNAME = "openai-community/gpt2"
|
229 |
// let MODELNAME = 'mistral_inference'
|
230 |
|
231 |
let out = await inference.textGeneration({
|
232 |
-
// accessToken: HFAUTH,
|
233 |
model: MODELNAME,
|
234 |
messages: [{
|
235 |
role: "system",
|
@@ -275,4 +226,56 @@ async function runModel(PREPROMPT, PROMPT){
|
|
275 |
// var blanksArray = ["mother", "father", "sister", "brother"]
|
276 |
// // for num of blanks put in list
|
277 |
|
278 |
-
//Error: Server Xenova/distilgpt2 does not seem to support chat completion. Error: HfApiJson(Deserialize(Error("unknown variant `transformers.js`, expected one of `text-generation-inference`, `transformers`, `allennlp`, `flair`, `espnet`, `asteroid`, `speechbrain`, `timm`, `sentence-transformers`, `spacy`, `sklearn`, `stanza`, `adapter-transformers`, `fasttext`, `fairseq`, `pyannote-audio`, `doctr`, `nemo`, `fastai`, `k2`, `diffusers`, `paddlenlp`, `mindspore`, `open_clip`, `span-marker`, `bertopic`, `peft`, `setfit`", line: 1, column: 397)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
172 |
|
173 |
///// MODEL STUFF
|
174 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
175 |
|
176 |
async function runModel(PREPROMPT, PROMPT){
|
177 |
+
// inference API version
|
178 |
|
|
|
179 |
let MODELNAME = "HuggingFaceH4/zephyr-7b-beta"
|
180 |
// let MODELNAME = "openai-community/gpt2"
|
181 |
// let MODELNAME = 'mistral_inference'
|
182 |
|
183 |
let out = await inference.textGeneration({
|
|
|
184 |
model: MODELNAME,
|
185 |
messages: [{
|
186 |
role: "system",
|
|
|
226 |
// var blanksArray = ["mother", "father", "sister", "brother"]
|
227 |
// // for num of blanks put in list
|
228 |
|
229 |
+
//Error: Server Xenova/distilgpt2 does not seem to support chat completion. Error: HfApiJson(Deserialize(Error("unknown variant `transformers.js`, expected one of `text-generation-inference`, `transformers`, `allennlp`, `flair`, `espnet`, `asteroid`, `speechbrain`, `timm`, `sentence-transformers`, `spacy`, `sklearn`, `stanza`, `adapter-transformers`, `fasttext`, `fairseq`, `pyannote-audio`, `doctr`, `nemo`, `fastai`, `k2`, `diffusers`, `paddlenlp`, `mindspore`, `open_clip`, `span-marker`, `bertopic`, `peft`, `setfit`", line: 1, column: 397)))
|
230 |
+
|
231 |
+
|
232 |
+
|
233 |
+
|
234 |
+
|
235 |
+
// async function runModel(PREPROMPT, PROMPT){
|
236 |
+
// // // Chat completion API
|
237 |
+
|
238 |
+
// let MODELNAME = 'mistralai/Mistral-Nemo-Instruct-2407'
|
239 |
+
|
240 |
+
// // 'meta-llama/Meta-Llama-3-70B-Instruct'
|
241 |
+
// // 'openai-community/gpt2'
|
242 |
+
// // 'Xenova/gpt-3.5-turbo'
|
243 |
+
// // , 'Xenova/distilgpt2'
|
244 |
+
// // 'mistralai/Mistral-7B-Instruct-v0.2'
|
245 |
+
// // 'HuggingFaceH4/zephyr-7b-beta'
|
246 |
+
|
247 |
+
// // pipeline/transformers version
|
248 |
+
// let pipe = await pipeline('text-generation', MODELNAME);
|
249 |
+
// // seems to work with default model distilgpt2 ugh
|
250 |
+
|
251 |
+
|
252 |
+
// // let out = await pipe(inputText, {
|
253 |
+
// // max_tokens: 250,
|
254 |
+
// // return_full_text: false
|
255 |
+
// // // repetition_penalty: 1.5,
|
256 |
+
// // // num_return_sequences: 1 //must be 1 for greedy search
|
257 |
+
// // })
|
258 |
+
|
259 |
+
// // let inputText = PREPROMPT + PROMPT
|
260 |
+
|
261 |
+
// // let out = await pipe(inputText)
|
262 |
+
|
263 |
+
// let out = await pipe({
|
264 |
+
// messages: [{
|
265 |
+
// role: "system",
|
266 |
+
// content: PREPROMPT
|
267 |
+
// },{
|
268 |
+
// role: "user",
|
269 |
+
// content: PROMPT
|
270 |
+
// }],
|
271 |
+
// max_new_tokens: 100
|
272 |
+
// });
|
273 |
+
|
274 |
+
// console.log(out)
|
275 |
+
|
276 |
+
// var modelResult = await out[0].generated_text
|
277 |
+
// console.log(modelResult)
|
278 |
+
|
279 |
+
// return modelResult
|
280 |
+
|
281 |
+
// }
|
tutorial.md
CHANGED
@@ -211,6 +211,14 @@ Paste this code into your `sketch.js` file.
|
|
211 |
|
212 |
Also add this to your `README.md`.
|
213 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
214 |
When you next load your app, click `Authorize`
|
215 |
|
216 |
![screenshot of Hugging Face app authorization screen]()
|
|
|
211 |
|
212 |
Also add this to your `README.md`.
|
213 |
|
214 |
+
```markdown
|
215 |
+
hf_oauth: true
|
216 |
+
hf_oauth_scopes:
|
217 |
+
- read-repos
|
218 |
+
- write-repos
|
219 |
+
- inference-api
|
220 |
+
```
|
221 |
+
|
222 |
When you next load your app, click `Authorize`
|
223 |
|
224 |
![screenshot of Hugging Face app authorization screen]()
|