Spaces:
Running
Running
use follow count through overview api
Browse files- app/actions/roast.ts +74 -42
- utils/roast.ts +52 -46
app/actions/roast.ts
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
-
"use server"
|
2 |
|
3 |
import { AutoTokenizer } from "@xenova/transformers";
|
4 |
-
import { HfInference } from
|
5 |
|
6 |
import { formatInformations, transformForInference } from "@/utils/roast";
|
7 |
import { FormProps } from "@/components/form";
|
@@ -10,10 +10,15 @@ import prisma from "@/utils/prisma";
|
|
10 |
const MODEL_ID = "meta-llama/Meta-Llama-3.1-70B-Instruct";
|
11 |
|
12 |
export async function roast({ username, language }: FormProps) {
|
13 |
-
const userResponse = await fetch(
|
|
|
|
|
14 |
const user = await userResponse.json();
|
15 |
if (!user || user.error) {
|
16 |
-
return {
|
|
|
|
|
|
|
17 |
}
|
18 |
|
19 |
if (!username) {
|
@@ -21,63 +26,90 @@ export async function roast({ username, language }: FormProps) {
|
|
21 |
}
|
22 |
|
23 |
const requests = Promise.all([
|
24 |
-
await fetch(
|
25 |
-
|
26 |
-
|
27 |
-
await fetch(
|
28 |
-
|
|
|
|
|
|
|
|
|
29 |
]);
|
30 |
|
31 |
-
const [
|
32 |
-
const [
|
33 |
-
followingResponse.json(),
|
34 |
-
followersResponse.json(),
|
35 |
spacesResponse.json(),
|
36 |
modelsResponse.json(),
|
37 |
-
collectionsResponse.json()
|
38 |
]);
|
39 |
-
const [spacesLikes, modelsLikes] = [spaces, models].map((items) =>
|
40 |
-
|
41 |
-
|
42 |
-
const
|
43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
const hf = new HfInference(process.env.HF_ACCESS_TOKEN);
|
46 |
-
const tokenizer = await AutoTokenizer.from_pretrained(
|
|
|
|
|
47 |
|
48 |
-
const formattedPrompt = tokenizer.apply_chat_template(chat, {
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
}
|
57 |
-
|
58 |
-
use_cache: false,
|
59 |
-
})
|
60 |
|
61 |
return {
|
62 |
-
data: res.generated_text
|
63 |
-
}
|
64 |
}
|
65 |
|
66 |
-
export async function getRoast({ id }
|
67 |
const roast = await prisma.quote.findUnique({
|
68 |
where: {
|
69 |
-
id
|
70 |
-
}
|
71 |
-
})
|
72 |
|
73 |
if (!roast) {
|
74 |
return {
|
75 |
error: "Roast not found",
|
76 |
-
status: 404
|
77 |
-
}
|
78 |
}
|
79 |
|
80 |
return {
|
81 |
-
data: roast
|
82 |
-
}
|
83 |
-
}
|
|
|
1 |
+
"use server";
|
2 |
|
3 |
import { AutoTokenizer } from "@xenova/transformers";
|
4 |
+
import { HfInference } from "@huggingface/inference";
|
5 |
|
6 |
import { formatInformations, transformForInference } from "@/utils/roast";
|
7 |
import { FormProps } from "@/components/form";
|
|
|
10 |
const MODEL_ID = "meta-llama/Meta-Llama-3.1-70B-Instruct";
|
11 |
|
12 |
export async function roast({ username, language }: FormProps) {
|
13 |
+
const userResponse = await fetch(
|
14 |
+
`https://huggingface.co/api/users/${username}/overview`
|
15 |
+
);
|
16 |
const user = await userResponse.json();
|
17 |
if (!user || user.error) {
|
18 |
+
return {
|
19 |
+
error: user.error ?? "Something wrong happened, please retry.",
|
20 |
+
status: 404,
|
21 |
+
};
|
22 |
}
|
23 |
|
24 |
if (!username) {
|
|
|
26 |
}
|
27 |
|
28 |
const requests = Promise.all([
|
29 |
+
await fetch(
|
30 |
+
`https://huggingface.co/api/spaces?author=${username}&sort=likes&limit=300&full=false&l`
|
31 |
+
),
|
32 |
+
await fetch(
|
33 |
+
`https://huggingface.co/api/models?author=${username}&sort=downloads&limit=300&full=false`
|
34 |
+
),
|
35 |
+
await fetch(
|
36 |
+
`https://huggingface.co/api/collections?owner=${username}&limit=100&sort=upvotes&full=false`
|
37 |
+
),
|
38 |
]);
|
39 |
|
40 |
+
const [spacesResponse, modelsResponse, collectionsResponse] = await requests;
|
41 |
+
const [spaces, models, collections] = await Promise.all([
|
|
|
|
|
42 |
spacesResponse.json(),
|
43 |
modelsResponse.json(),
|
44 |
+
collectionsResponse.json(),
|
45 |
]);
|
46 |
+
const [spacesLikes, modelsLikes] = [spaces, models].map((items) =>
|
47 |
+
items.reduce((acc: number, item: any) => acc + item.likes, 0)
|
48 |
+
);
|
49 |
+
const collectionsUpvotes = collections?.reduce(
|
50 |
+
(acc: number, item: any) => acc + item.upvotes,
|
51 |
+
0
|
52 |
+
);
|
53 |
+
|
54 |
+
const datas = formatInformations(
|
55 |
+
user,
|
56 |
+
spaces,
|
57 |
+
models,
|
58 |
+
collections,
|
59 |
+
spacesLikes,
|
60 |
+
modelsLikes,
|
61 |
+
collectionsUpvotes
|
62 |
+
);
|
63 |
+
const chat = transformForInference(
|
64 |
+
datas,
|
65 |
+
language,
|
66 |
+
user.fullname ?? username
|
67 |
+
);
|
68 |
|
69 |
const hf = new HfInference(process.env.HF_ACCESS_TOKEN);
|
70 |
+
const tokenizer = await AutoTokenizer.from_pretrained(
|
71 |
+
"philschmid/meta-llama-3-tokenizer"
|
72 |
+
);
|
73 |
|
74 |
+
const formattedPrompt = tokenizer.apply_chat_template(chat, {
|
75 |
+
tokenize: false,
|
76 |
+
add_generation_prompt: true,
|
77 |
+
});
|
78 |
+
const res = await hf.textGeneration(
|
79 |
+
{
|
80 |
+
model: MODEL_ID,
|
81 |
+
inputs: formattedPrompt as string,
|
82 |
+
parameters: {
|
83 |
+
return_full_text: false,
|
84 |
+
max_new_tokens: 1024,
|
85 |
+
stop_sequences: ["<|end|>", "<|endoftext|>", "<|assistant|>"],
|
86 |
+
},
|
87 |
+
},
|
88 |
+
{
|
89 |
+
use_cache: false,
|
90 |
}
|
91 |
+
);
|
|
|
|
|
92 |
|
93 |
return {
|
94 |
+
data: res.generated_text,
|
95 |
+
};
|
96 |
}
|
97 |
|
98 |
+
export async function getRoast({ id }: { id: string }) {
|
99 |
const roast = await prisma.quote.findUnique({
|
100 |
where: {
|
101 |
+
id,
|
102 |
+
},
|
103 |
+
});
|
104 |
|
105 |
if (!roast) {
|
106 |
return {
|
107 |
error: "Roast not found",
|
108 |
+
status: 404,
|
109 |
+
};
|
110 |
}
|
111 |
|
112 |
return {
|
113 |
+
data: roast,
|
114 |
+
};
|
115 |
+
}
|
utils/roast.ts
CHANGED
@@ -2,8 +2,6 @@ import { Language } from "@/components/form";
|
|
2 |
|
3 |
export const formatInformations = (
|
4 |
user: any,
|
5 |
-
countFollowing: number,
|
6 |
-
countFollowers: number,
|
7 |
spaces: any,
|
8 |
models: any,
|
9 |
collections: any,
|
@@ -12,70 +10,78 @@ export const formatInformations = (
|
|
12 |
collectionsUpvotes: number
|
13 |
) => {
|
14 |
const datas = {
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
name: org.fullname
|
19 |
})),
|
20 |
-
|
21 |
-
|
22 |
total_spaces_likes: spacesLikes,
|
23 |
total_models_likes: modelsLikes,
|
24 |
total_collections_likes: collectionsUpvotes,
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
created_at: space.createdAt
|
32 |
-
|
33 |
-
|
34 |
last_5_models: models
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
created_at: model.createdAt
|
43 |
-
|
44 |
-
|
45 |
last_5_collections: collections
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
|
54 |
return datas;
|
55 |
-
}
|
56 |
|
57 |
export const transformForInference = (
|
58 |
datas: Record<string, any>,
|
59 |
language: Language,
|
60 |
-
username: string
|
61 |
) => {
|
62 |
-
let user_content = `give a short and harsh roasting for the following hugging face profile: ${username}. Here are the details: "${JSON.stringify(
|
|
|
|
|
63 |
|
64 |
switch (language) {
|
65 |
case "fr":
|
66 |
-
user_content = `fais une courte et cruelle critique sarcastique en argot pour le profil Hugging Face suivant en français : ${username}. Voici les détails: "${JSON.stringify(
|
|
|
|
|
67 |
break;
|
68 |
case "es":
|
69 |
-
user_content = `haz una crítica corta y cruel para el siguiente perfil de Hugging Face en español: ${username}. Aquí están los detalles: "${JSON.stringify(
|
|
|
|
|
70 |
break;
|
71 |
}
|
72 |
|
73 |
-
const chat = [
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
|
|
|
|
79 |
|
80 |
return chat;
|
81 |
-
}
|
|
|
2 |
|
3 |
export const formatInformations = (
|
4 |
user: any,
|
|
|
|
|
5 |
spaces: any,
|
6 |
models: any,
|
7 |
collections: any,
|
|
|
10 |
collectionsUpvotes: number
|
11 |
) => {
|
12 |
const datas = {
|
13 |
+
name: user.fullname,
|
14 |
+
bio: user.details,
|
15 |
+
organizations: user.orgs?.map((org: any) => ({
|
16 |
+
name: org.fullname,
|
17 |
})),
|
18 |
+
followers: user.numFollowers,
|
19 |
+
following: user.numFollowers,
|
20 |
total_spaces_likes: spacesLikes,
|
21 |
total_models_likes: modelsLikes,
|
22 |
total_collections_likes: collectionsUpvotes,
|
23 |
+
last_5_spaces: spaces
|
24 |
+
.map((space: any) => ({
|
25 |
+
name: space?.cardData?.title ?? space.id?.split("/")[1],
|
26 |
+
description: space?.cardData?.short_description,
|
27 |
+
likes_count: space.likes,
|
28 |
+
last_modified: space.lastModified,
|
29 |
+
created_at: space.createdAt,
|
30 |
+
}))
|
31 |
+
.slice(0, 5),
|
32 |
last_5_models: models
|
33 |
+
.map((model: any) => ({
|
34 |
+
name: model.id?.split("/")[1],
|
35 |
+
has_inference: model.inference,
|
36 |
+
likes_count: model.likes,
|
37 |
+
downloads_count: model.downloads,
|
38 |
+
pipeline_tag: model.pipeline_tag,
|
39 |
+
last_modified: model.lastModified,
|
40 |
+
created_at: model.createdAt,
|
41 |
+
}))
|
42 |
+
.slice(0, 5),
|
43 |
last_5_collections: collections
|
44 |
+
.map((collection: any) => ({
|
45 |
+
name: collection.title,
|
46 |
+
description: collection.description,
|
47 |
+
upvotes_count: collection.upvotes,
|
48 |
+
}))
|
49 |
+
.slice(0, 5),
|
50 |
+
};
|
51 |
|
52 |
return datas;
|
53 |
+
};
|
54 |
|
55 |
export const transformForInference = (
|
56 |
datas: Record<string, any>,
|
57 |
language: Language,
|
58 |
+
username: string
|
59 |
) => {
|
60 |
+
let user_content = `give a short and harsh roasting for the following hugging face profile: ${username}. Here are the details: "${JSON.stringify(
|
61 |
+
datas
|
62 |
+
)}"`;
|
63 |
|
64 |
switch (language) {
|
65 |
case "fr":
|
66 |
+
user_content = `fais une courte et cruelle critique sarcastique en argot pour le profil Hugging Face suivant en français : ${username}. Voici les détails: "${JSON.stringify(
|
67 |
+
datas
|
68 |
+
)}"`;
|
69 |
break;
|
70 |
case "es":
|
71 |
+
user_content = `haz una crítica corta y cruel para el siguiente perfil de Hugging Face en español: ${username}. Aquí están los detalles: "${JSON.stringify(
|
72 |
+
datas
|
73 |
+
)}"`;
|
74 |
break;
|
75 |
}
|
76 |
|
77 |
+
const chat = [
|
78 |
+
{
|
79 |
+
role: "assistant",
|
80 |
+
content:
|
81 |
+
"You roast people hugging face account based on their bio, name, spaces, and models as harsh and spicy as possible, and keep it short.",
|
82 |
+
},
|
83 |
+
{ role: "user", content: user_content },
|
84 |
+
];
|
85 |
|
86 |
return chat;
|
87 |
+
};
|