Arsala Grey
Added new HF zephyr model
01da88d
const { createApp, ref, onMounted, computed } = Vue;
import { HfInference } from "https://cdn.skypack.dev/@huggingface/inference@latest";
const app = createApp({
setup() {
const token = ref(localStorage.getItem("token") || "");
const userPrompt = ref("Write about the difference between Data Science and AI Engineering.");
const currentGeneratedText = ref("");
const models = ref(["HuggingFaceH4/zephyr-7b-beta", "mistralai/Mistral-7B-v0.1",
"google/flan-t5-xxl"]);
const selectedModel = ref("");
const isRunning = ref(false);
const responseLength = ref("150");
const temperature = ref("1.0");
const generating = ref(false);
let controller;
const statusMessage = computed(() => {
if (generating.value) return "Generating..."
return "Ready"
})
const createTextGenerationStream = (hfInstance, prompt, abortControllerSignal) => {
return hfInstance.textGenerationStream(
{
model: selectedModel.value,
inputs: prompt,
parameters: {
max_new_tokens: parseInt(responseLength.value),
temperature: parseFloat(temperature.value),
},
},
{
use_cache: false,
signal: abortControllerSignal,
}
);
};
const generateTextStream = async function* (hfInstance, abortSignal, prompt) {
let generatedText = ""
generating.value = true
for await (const output of createTextGenerationStream(hfInstance, prompt, abortSignal)) {
generatedText += output.token.text;
yield generatedText;
}
generating.value = false
};
const run = async () => {
isRunning.value = true;
currentGeneratedText.value = "";
controller = new AbortController();
localStorage.setItem("token", token.value);
const hfInstance = new HfInference(token.value);
try {
for await (const textStream of generateTextStream(
hfInstance,
controller.signal,
userPrompt.value
)) {
currentGeneratedText.value = textStream;
}
} catch (e) {
console.log(e);
}
};
const stop = () => {
if (controller) {
controller.abort();
}
isRunning.value = false;
};
onMounted(async () => {
const localStorageToken = localStorage.getItem("token")
if (localStorageToken) {
token.value = localStorageToken;
}
selectedModel.value = models.value[0];
});
return {
token,
userPrompt,
currentGeneratedText,
run,
stop,
models,
selectedModel,
isRunning,
responseLength,
temperature,
statusMessage
};
},
});
app.mount("#app");