slug
stringlengths 15
15
| content
listlengths 1
129
| rawContent
stringlengths 1
2k
| author
dict | attachments
listlengths 0
49
| mentions
listlengths 0
49
| reactions
listlengths 0
12
| publishedAt
stringlengths 24
24
| updatedAt
stringlengths 24
24
| commentators
listlengths 0
47
| url
stringlengths 25
46
| totalUniqueImpressions
int64 1
41.5k
| numComments
int64 0
621
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
954424433836290 | [
{
"type": "text",
"value": "Excited to share my new Gradio app featuring the impressive Llama-3.1-Storm-8B model!",
"raw": "Excited to share my new Gradio app featuring the impressive Llama-3.1-Storm-8B model!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "This app demonstrates the capabilities of Llama-3.1-Storm-8B, an 8B parameter language model created by Ashvini Kumar Jindal, Pawan Kumar Rajpoot, Ankur Parikh,@akjindal53244 ",
"raw": "This app demonstrates the capabilities of Llama-3.1-Storm-8B, an 8B parameter language model created by Ashvini Kumar Jindal, Pawan Kumar Rajpoot, Ankur Parikh,@akjindal53244 ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Key highlights of Llama-3.1-Storm-8B:",
"raw": "Key highlights of Llama-3.1-Storm-8B:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Outperforms Llama-3.1-8B-Instruct on multiple benchmarks:",
"raw": "Outperforms Llama-3.1-8B-Instruct on multiple benchmarks:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Instruction Following (IFEval): +3.93%",
"raw": "Instruction Following (IFEval): +3.93%",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Knowledge-driven QA (GPQA): +7.21%",
"raw": "Knowledge-driven QA (GPQA): +7.21%",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Reduced Hallucinations (TruthfulQA): +9%",
"raw": "Reduced Hallucinations (TruthfulQA): +9%",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Function Calling (BFCL): +7.92%",
"raw": "Function Calling (BFCL): +7.92%",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Achieves impressive results with only 8B parameters",
"raw": "Achieves impressive results with only 8B parameters",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Uses innovative techniques like self-curation and model merging",
"raw": "Uses innovative techniques like self-curation and model merging",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Try out the model yourself: ",
"raw": "Try out the model yourself: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/sagar007/lama_storm_8b",
"resource": {
"type": "space",
"id": "sagar007/lama_storm_8b",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/sagar007/lama_storm_8b",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Kudos to the creators for pushing the boundaries of smaller language models! This work makes advanced AI more accessible and efficient.",
"raw": "Kudos to the creators for pushing the boundaries of smaller language models! This work makes advanced AI more accessible and efficient.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "#AI #NLP #MachineLearning #GradioApp #Llama3",
"raw": "#AI #NLP #MachineLearning #GradioApp #Llama3",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Excited to share my new Gradio app featuring the impressive Llama-3.1-Storm-8B model!
This app demonstrates the capabilities of Llama-3.1-Storm-8B, an 8B parameter language model created by Ashvini Kumar Jindal, Pawan Kumar Rajpoot, Ankur Parikh,@akjindal53244
Key highlights of Llama-3.1-Storm-8B:
Outperforms Llama-3.1-8B-Instruct on multiple benchmarks:
Instruction Following (IFEval): +3.93%
Knowledge-driven QA (GPQA): +7.21%
Reduced Hallucinations (TruthfulQA): +9%
Function Calling (BFCL): +7.92%
Achieves impressive results with only 8B parameters
Uses innovative techniques like self-curation and model merging
Try out the model yourself: https://huggingface.co/spaces/sagar007/lama_storm_8b
Kudos to the creators for pushing the boundaries of smaller language models! This work makes advanced AI more accessible and efficient.
#AI #NLP #MachineLearning #GradioApp #Llama3 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a464cfe0de0c5c6d8b04a1/1gCs46R_bW9apQzLQUrn5.png",
"fullname": "Sagar pallai",
"name": "sagar007",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 8,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"ibrahim313",
"akhaliq",
"alielfilali01",
"Tanvir1337",
"sagar007"
],
"count": 5
},
{
"reaction": "👀",
"users": [
"John6666",
"akhaliq",
"alielfilali01"
],
"count": 3
}
] | 2024-08-28T14:04:03.000Z | 2024-08-28T14:04:03.489Z | [] | /posts/sagar007/954424433836290 | 1,240 | 0 |
500797828453050 | [
{
"type": "text",
"value": "📫 AI in the News today: ",
"raw": "📫 AI in the News today: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "X’s Grok bot now points to government website after election misinformation warnings - The Verge",
"raw": "X’s Grok bot now points to government website after election misinformation warnings - The Verge",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://www.theverge.com/2024/8/28/24230325/x-grok-chatbot-election-misinformation-warnings-vote",
"resource": null,
"url": null,
"href": "https://www.theverge.com/2024/8/28/24230325/x-grok-chatbot-election-misinformation-warnings-vote",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Klarna aims to halve workforce with AI-driven gains",
"raw": "Klarna aims to halve workforce with AI-driven gains",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://www.ft.com/content/bfd9af3d-d607-4877-9571-078ab82a837e",
"resource": null,
"url": null,
"href": "https://www.ft.com/content/bfd9af3d-d607-4877-9571-078ab82a837e",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Artificial intelligence: questioning the loss of employee autonomy - Le Monde (Google Translate)",
"raw": "Artificial intelligence: questioning the loss of employee autonomy - Le Monde (Google Translate)",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://www-lemonde-fr.translate.goog/emploi/article/2024/08/28/intelligence-artificielle-la-perte-d-autonomie-des-salaries-en-question_6297347_1698637.html?_x_tr_sl=fr&_x_tr_tl=en&_x_tr_hl=en-US&_x_tr_pto=wapp",
"resource": null,
"url": null,
"href": "https://www-lemonde-fr.translate.goog/emploi/article/2024/08/28/intelligence-artificielle-la-perte-d-autonomie-des-salaries-en-question_6297347_1698637.html?_x_tr_sl=fr&_x_tr_tl=en&_x_tr_hl=en-US&_x_tr_pto=wapp",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Make AI tools to reduce teacher workloads, tech companies urged - The Guardian",
"raw": "Make AI tools to reduce teacher workloads, tech companies urged - The Guardian",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://www.theguardian.com/education/article/2024/aug/28/make-ai-tools-to-reduce-teacher-workloads-tech-companies-urged",
"resource": null,
"url": null,
"href": "https://www.theguardian.com/education/article/2024/aug/28/make-ai-tools-to-reduce-teacher-workloads-tech-companies-urged",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Can Tech Executives Be Held Responsible for What Happens on Their Platforms?",
"raw": "Can Tech Executives Be Held Responsible for What Happens on Their Platforms?",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://www.nytimes.com/2024/08/28/technology/durov-telegram-liability-platforms.html",
"resource": null,
"url": null,
"href": "https://www.nytimes.com/2024/08/28/technology/durov-telegram-liability-platforms.html",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "‘Being on camera is no longer sensible’: persecuted Venezuelan journalists turn to AI - The Guardian",
"raw": "‘Being on camera is no longer sensible’: persecuted Venezuelan journalists turn to AI - The Guardian",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://www.theguardian.com/world/article/2024/aug/27/venezuela-journalists-nicolas-maduro-artificial-intelligence-media-election",
"resource": null,
"url": null,
"href": "https://www.theguardian.com/world/article/2024/aug/27/venezuela-journalists-nicolas-maduro-artificial-intelligence-media-election",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Read my daily newsletter here: ",
"raw": "Read my daily newsletter here: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://linkedin.com/pulse/ai-news-august-28th-2024-florent-daudens-o7mjc/",
"resource": null,
"url": null,
"href": "https://linkedin.com/pulse/ai-news-august-28th-2024-florent-daudens-o7mjc/",
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 📫 AI in the News today:
X’s Grok bot now points to government website after election misinformation warnings - The Verge
https://www.theverge.com/2024/8/28/24230325/x-grok-chatbot-election-misinformation-warnings-vote
Klarna aims to halve workforce with AI-driven gains
https://www.ft.com/content/bfd9af3d-d607-4877-9571-078ab82a837e
Artificial intelligence: questioning the loss of employee autonomy - Le Monde (Google Translate)
https://www-lemonde-fr.translate.goog/emploi/article/2024/08/28/intelligence-artificielle-la-perte-d-autonomie-des-salaries-en-question_6297347_1698637.html?_x_tr_sl=fr&_x_tr_tl=en&_x_tr_hl=en-US&_x_tr_pto=wapp
Make AI tools to reduce teacher workloads, tech companies urged - The Guardian
https://www.theguardian.com/education/article/2024/aug/28/make-ai-tools-to-reduce-teacher-workloads-tech-companies-urged
Can Tech Executives Be Held Responsible for What Happens on Their Platforms?
https://www.nytimes.com/2024/08/28/technology/durov-telegram-liability-platforms.html
‘Being on camera is no longer sensible’: persecuted Venezuelan journalists turn to AI - The Guardian
https://www.theguardian.com/world/article/2024/aug/27/venezuela-journalists-nicolas-maduro-artificial-intelligence-media-election
Read my daily newsletter here: https://linkedin.com/pulse/ai-news-august-28th-2024-florent-daudens-o7mjc/ | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg",
"fullname": "Florent Daudens",
"name": "fdaudens",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 364,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-08-28T13:31:55.000Z | 2024-08-28T13:31:55.630Z | [] | /posts/fdaudens/500797828453050 | 458 | 0 |
488681841400938 | [
{
"type": "text",
"value": "🧠Shower Thought:",
"raw": "🧠Shower Thought:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Chatbots should let users select their preferred reading speed, defined by words per minute. ",
"raw": "Chatbots should let users select their preferred reading speed, defined by words per minute. ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "By dynamically adjusting batch sizes based on user-defined reading speeds, you could more effectively distribute requests, especially in large-scale distributed systems. For users preferring slower token generation, larger batches can be processed concurrently, maximising GPU throughput without compromising user experience (as these users have expressed they are indifferent to, or may even prefer, higher latency). ",
"raw": "By dynamically adjusting batch sizes based on user-defined reading speeds, you could more effectively distribute requests, especially in large-scale distributed systems. For users preferring slower token generation, larger batches can be processed concurrently, maximising GPU throughput without compromising user experience (as these users have expressed they are indifferent to, or may even prefer, higher latency). ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "For the user, for different tasks the user may prefer different reading speeds. When generating code, I want responses as quickly as possible. But when I'm bouncing ideas off an LLM, I'd prefer a more readable pace rather than a wall of text.",
"raw": "For the user, for different tasks the user may prefer different reading speeds. When generating code, I want responses as quickly as possible. But when I'm bouncing ideas off an LLM, I'd prefer a more readable pace rather than a wall of text.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Thoughts? ",
"raw": "Thoughts? ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 🧠Shower Thought:
Chatbots should let users select their preferred reading speed, defined by words per minute.
By dynamically adjusting batch sizes based on user-defined reading speeds, you could more effectively distribute requests, especially in large-scale distributed systems. For users preferring slower token generation, larger batches can be processed concurrently, maximising GPU throughput without compromising user experience (as these users have expressed they are indifferent to, or may even prefer, higher latency).
For the user, for different tasks the user may prefer different reading speeds. When generating code, I want responses as quickly as possible. But when I'm bouncing ideas off an LLM, I'd prefer a more readable pace rather than a wall of text.
Thoughts?
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/651d4e73acd8e9168ac92b04/WMYCWKx9MM8Xxj8vXursD.png",
"fullname": "Jonah Ramponi",
"name": "jonah-ramponi",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-08-28T10:20:13.000Z | 2024-08-29T11:29:12.285Z | [] | /posts/jonah-ramponi/488681841400938 | 491 | 0 |
631240060614222 | [
{
"type": "text",
"value": "amazing leaderboard by ",
"raw": "amazing leaderboard by ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "mention",
"value": null,
"raw": "@rwightman",
"resource": null,
"url": null,
"href": null,
"user": "rwightman",
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": ", compare all the image backbones on various metrics against model performance ",
"raw": ", compare all the image backbones on various metrics against model performance ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "below is an example for top-k against inferred samples per second",
"raw": "below is an example for top-k against inferred samples per second",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/timm/leaderboard",
"resource": {
"type": "space",
"id": "timm/leaderboard",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/timm/leaderboard",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | amazing leaderboard by @rwightman, compare all the image backbones on various metrics against model performance
below is an example for top-k against inferred samples per second
https://huggingface.co/spaces/timm/leaderboard | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png",
"fullname": "Merve Noyan",
"name": "merve",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 5520,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6141a88b3a0ec78603c9e784/bAnAlJqYEr7QnQuWG3B8p.jpeg"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1667002643224-604a5184dca2c7ac7508b849.jpeg",
"fullname": "Ross Wightman",
"name": "rwightman",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 214
}
] | [
{
"reaction": "🤗",
"users": [
"ariG23498",
"John6666",
"amyeroberts",
"DmitryRyumin",
"rwightman",
"gxkok",
"louisbrulenaudet"
],
"count": 7
}
] | 2024-08-28T10:09:09.000Z | 2024-08-28T10:09:09.938Z | [] | /posts/merve/631240060614222 | 2,258 | 0 |
841288195333865 | [
{
"type": "text",
"value": "🎮 𝗔 𝗻𝗲𝘂𝗿𝗮𝗹 𝗻𝗲𝘁𝘄𝗼𝗿𝗸 𝘀𝗶𝗺𝘂𝗹𝗮𝘁𝗲𝘀 𝗗𝗢𝗢𝗠: 𝗚𝗼𝗼𝗴𝗹𝗲 𝗿𝗲𝘀𝗲𝗮𝗿𝗰𝗵𝗲𝗿𝘀 𝗼𝗽𝗲𝗻 𝘁𝗵𝗲 𝘄𝗮𝘆 𝗳𝗼𝗿 𝗰𝗼𝗺𝗽𝗹𝗲𝘁𝗲𝗹𝘆-𝗔𝗜-𝗴𝗲𝗻𝗲𝗿𝗮𝘁𝗲𝗱 𝗴𝗮𝗺𝗲𝘀!",
"raw": "🎮 𝗔 𝗻𝗲𝘂𝗿𝗮𝗹 𝗻𝗲𝘁𝘄𝗼𝗿𝗸 𝘀𝗶𝗺𝘂𝗹𝗮𝘁𝗲𝘀 𝗗𝗢𝗢𝗠: 𝗚𝗼𝗼𝗴𝗹𝗲 𝗿𝗲𝘀𝗲𝗮𝗿𝗰𝗵𝗲𝗿𝘀 𝗼𝗽𝗲𝗻 𝘁𝗵𝗲 𝘄𝗮𝘆 𝗳𝗼𝗿 𝗰𝗼𝗺𝗽𝗹𝗲𝘁𝗲𝗹𝘆-𝗔𝗜-𝗴𝗲𝗻𝗲𝗿𝗮𝘁𝗲𝗱 𝗴𝗮𝗺𝗲𝘀!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Imagine if games were completely live-generated by an AI model : the NPCs and their dialogues, the storyline, and even the game environment. The player’s in-game actions would have a real, lasting impact on the game story.",
"raw": "Imagine if games were completely live-generated by an AI model : the NPCs and their dialogues, the storyline, and even the game environment. The player’s in-game actions would have a real, lasting impact on the game story.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "In a very exciting paper, Google researchers just gave us the first credible glimpse of this future.",
"raw": "In a very exciting paper, Google researchers just gave us the first credible glimpse of this future.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "➡️ They created GameNGen, the first neural model that can simulate a complex 3D game in real-time. They use it to simulate the classic game DOOM running at over 20 frames per second on a single TPU, with image quality comparable to lossy JPEG compression. And it feels just like the true game!",
"raw": "➡️ They created GameNGen, the first neural model that can simulate a complex 3D game in real-time. They use it to simulate the classic game DOOM running at over 20 frames per second on a single TPU, with image quality comparable to lossy JPEG compression. And it feels just like the true game!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Here's how they did it:",
"raw": "Here's how they did it:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "1. They trained an RL agent to play DOOM and recorded its gameplay sessions.",
"raw": "1. They trained an RL agent to play DOOM and recorded its gameplay sessions.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "2. They then used these recordings to train a diffusion model to predict the next frame, based on past frames and player actions.",
"raw": "2. They then used these recordings to train a diffusion model to predict the next frame, based on past frames and player actions.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "3. During inference, they use only 4 denoising steps (instead of the usual dozens) to generate each frame quickly.",
"raw": "3. During inference, they use only 4 denoising steps (instead of the usual dozens) to generate each frame quickly.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "𝗞𝗲𝘆 𝗶𝗻𝘀𝗶𝗴𝗵𝘁𝘀:",
"raw": "𝗞𝗲𝘆 𝗶𝗻𝘀𝗶𝗴𝗵𝘁𝘀:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🎮🤔 Human players can barely tell the difference between short clips (3 seconds) of the real game or the simulation",
"raw": "🎮🤔 Human players can barely tell the difference between short clips (3 seconds) of the real game or the simulation",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🧠 The model maintains game state (health, ammo, etc.) over long periods despite having only 3 seconds of effective context length",
"raw": "🧠 The model maintains game state (health, ammo, etc.) over long periods despite having only 3 seconds of effective context length",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🔄 They use \"noise augmentation\" during training to prevent quality degradation in long play sessions",
"raw": "🔄 They use \"noise augmentation\" during training to prevent quality degradation in long play sessions",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🚀 The game runs on one TPU at 20 FPS with 4 denoising steps, or 50 FPS with model distillation (with some quality loss)",
"raw": "🚀 The game runs on one TPU at 20 FPS with 4 denoising steps, or 50 FPS with model distillation (with some quality loss)",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "The researchers did not open source the code, but I feel like we’ve just seen a part of the future being written!",
"raw": "The researchers did not open source the code, but I feel like we’ve just seen a part of the future being written!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Their paper (exploding the upvote counter) 👉 ",
"raw": "Their paper (exploding the upvote counter) 👉 ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2408.14837",
"resource": {
"type": "paper",
"id": "2408.14837",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2408.14837",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": "Diffusion Models Are Real-Time Game Engines (2408.14837)"
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "In a similar vein, play ",
"raw": "In a similar vein, play ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "mention",
"value": null,
"raw": "@Jofthomas",
"resource": null,
"url": null,
"href": null,
"user": "Jofthomas",
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "'s 'Everchanging Quest' 🎮 ",
"raw": "'s 'Everchanging Quest' 🎮 ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/Jofthomas/Everchanging-Quest",
"resource": {
"type": "space",
"id": "Jofthomas/Everchanging-Quest",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/Jofthomas/Everchanging-Quest",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 🎮 𝗔 𝗻𝗲𝘂𝗿𝗮𝗹 𝗻𝗲𝘁𝘄𝗼𝗿𝗸 𝘀𝗶𝗺𝘂𝗹𝗮𝘁𝗲𝘀 𝗗𝗢𝗢𝗠: 𝗚𝗼𝗼𝗴𝗹𝗲 𝗿𝗲𝘀𝗲𝗮𝗿𝗰𝗵𝗲𝗿𝘀 𝗼𝗽𝗲𝗻 𝘁𝗵𝗲 𝘄𝗮𝘆 𝗳𝗼𝗿 𝗰𝗼𝗺𝗽𝗹𝗲𝘁𝗲𝗹𝘆-𝗔𝗜-𝗴𝗲𝗻𝗲𝗿𝗮𝘁𝗲𝗱 𝗴𝗮𝗺𝗲𝘀!
Imagine if games were completely live-generated by an AI model : the NPCs and their dialogues, the storyline, and even the game environment. The player’s in-game actions would have a real, lasting impact on the game story.
In a very exciting paper, Google researchers just gave us the first credible glimpse of this future.
➡️ They created GameNGen, the first neural model that can simulate a complex 3D game in real-time. They use it to simulate the classic game DOOM running at over 20 frames per second on a single TPU, with image quality comparable to lossy JPEG compression. And it feels just like the true game!
Here's how they did it:
1. They trained an RL agent to play DOOM and recorded its gameplay sessions.
2. They then used these recordings to train a diffusion model to predict the next frame, based on past frames and player actions.
3. During inference, they use only 4 denoising steps (instead of the usual dozens) to generate each frame quickly.
𝗞𝗲𝘆 𝗶𝗻𝘀𝗶𝗴𝗵𝘁𝘀:
🎮🤔 Human players can barely tell the difference between short clips (3 seconds) of the real game or the simulation
🧠 The model maintains game state (health, ammo, etc.) over long periods despite having only 3 seconds of effective context length
🔄 They use "noise augmentation" during training to prevent quality degradation in long play sessions
🚀 The game runs on one TPU at 20 FPS with 4 denoising steps, or 50 FPS with model distillation (with some quality loss)
The researchers did not open source the code, but I feel like we’ve just seen a part of the future being written!
Their paper (exploding the upvote counter) 👉 https://huggingface.co/papers/2408.14837
In a similar vein, play @Jofthomas's 'Everchanging Quest' 🎮 https://huggingface.co/spaces/Jofthomas/Everchanging-Quest | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg",
"fullname": "Aymeric Roucher",
"name": "m-ric",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 476,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/uE4lKCbAY2G3Ape-JC4uk.mp4"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64257c616d0f0f5f1dc6aa2a/WNXC2PcyDn-jt9ZY5Rbka.jpeg",
"fullname": "Joffrey THOMAS",
"name": "Jofthomas",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 83
}
] | [
{
"reaction": "🔥",
"users": [
"alielfilali01",
"John6666",
"Jofthomas",
"Joseph717171",
"prithivMLmods",
"Hrushi",
"bmorphism",
"victor",
"BoothyBoothyBoothy",
"Pomni",
"zhangsibo1129"
],
"count": 11
}
] | 2024-08-28T06:19:24.000Z | 2024-11-08T20:18:14.664Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/xrJ30F3IA11cBe3BYCLMB.jpeg",
"fullname": "dixydomangrimes",
"name": "rixy1",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
}
] | /posts/m-ric/841288195333865 | 2,113 | 1 |
397440275785544 | [
{
"type": "text",
"value": "You can run Llama405B at over 100 tokens per second for free using SambaNova's API! ",
"raw": "You can run Llama405B at over 100 tokens per second for free using SambaNova's API! ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://sambanova.ai/fast-api?api_ref=444868",
"resource": null,
"url": null,
"href": "https://sambanova.ai/fast-api?api_ref=444868",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "I have been able to generate some high quality synthetic data and use it as an LLM as a judge instead of the slower and more expensive alternatives like openAI or Anthropic. ",
"raw": "I have been able to generate some high quality synthetic data and use it as an LLM as a judge instead of the slower and more expensive alternatives like openAI or Anthropic. ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | You can run Llama405B at over 100 tokens per second for free using SambaNova's API! https://sambanova.ai/fast-api?api_ref=444868
I have been able to generate some high quality synthetic data and use it as an LLM as a judge instead of the slower and more expensive alternatives like openAI or Anthropic.
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/zxdZvpuAP6qEhk3vyRO3_.jpeg",
"fullname": "Zoltan Csaki",
"name": "zolicsaki",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 30,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🚀",
"users": [
"Z01IC",
"John6666",
"zolicsaki",
"kz919",
"Saripudin"
],
"count": 5
},
{
"reaction": "🤗",
"users": [
"joseEjmendez",
"kz919",
"zolicsaki"
],
"count": 3
}
] | 2024-08-27T21:12:19.000Z | 2024-08-28T00:39:47.203Z | [
{
"avatarUrl": "/avatars/0c2378a034649dc92fbaa868e326cebb.svg",
"fullname": "gghf",
"name": "gghfez",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 12,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/zxdZvpuAP6qEhk3vyRO3_.jpeg",
"fullname": "Zoltan Csaki",
"name": "zolicsaki",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 30,
"isFollowing": false
}
] | /posts/zolicsaki/397440275785544 | 1,809 | 2 |
125833594432723 | [
{
"type": "text",
"value": "$40K in Bounties: Ultimate Jailbreaking Championship 2024",
"raw": "$40K in Bounties: Ultimate Jailbreaking Championship 2024",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🚨Ultimate Jailbreaking Championship 2024 🚨",
"raw": "🚨Ultimate Jailbreaking Championship 2024 🚨",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Hackers vs. AI in the arena. Let the battle begin!",
"raw": "Hackers vs. AI in the arena. Let the battle begin!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🏆 $40,000 in Bounties",
"raw": "🏆 $40,000 in Bounties",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🗓️ Sept 7, 2024 @ 10AM PDT",
"raw": "🗓️ Sept 7, 2024 @ 10AM PDT",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🔗Register Now: ",
"raw": "🔗Register Now: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://app.grayswan.ai/arena",
"resource": null,
"url": null,
"href": "https://app.grayswan.ai/arena",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "====",
"raw": "====",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Can you push an aligned language model to generate a bomb recipe or a fake news article? Join fellow hackers in a jailbreaking arena where you can test the boundaries of advanced LLMs.",
"raw": "Can you push an aligned language model to generate a bomb recipe or a fake news article? Join fellow hackers in a jailbreaking arena where you can test the boundaries of advanced LLMs.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "====",
"raw": "====",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "The Objective",
"raw": "The Objective",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Your goal is to jailbreak as many LLMs as possible, as quickly as possible in the arena!",
"raw": "Your goal is to jailbreak as many LLMs as possible, as quickly as possible in the arena!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "====",
"raw": "====",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "The Stakes",
"raw": "The Stakes",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Break a model and claim your share of the $40,000 in bounties! With various jailbreak bounties and top hacker rewards, there are plenty of opportunities to win. Winners will also receive priority consideration for employment and internship opportunities at Gray Swan AI.",
"raw": "Break a model and claim your share of the $40,000 in bounties! With various jailbreak bounties and top hacker rewards, there are plenty of opportunities to win. Winners will also receive priority consideration for employment and internship opportunities at Gray Swan AI.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "====",
"raw": "====",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Ready to rise to the challenge? Join us and show the world what you can do!",
"raw": "Ready to rise to the challenge? Join us and show the world what you can do!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "See you in the arena!",
"raw": "See you in the arena!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | $40K in Bounties: Ultimate Jailbreaking Championship 2024
🚨Ultimate Jailbreaking Championship 2024 🚨
Hackers vs. AI in the arena. Let the battle begin!
🏆 $40,000 in Bounties
🗓️ Sept 7, 2024 @ 10AM PDT
🔗Register Now: https://app.grayswan.ai/arena
====
Can you push an aligned language model to generate a bomb recipe or a fake news article? Join fellow hackers in a jailbreaking arena where you can test the boundaries of advanced LLMs.
====
The Objective
Your goal is to jailbreak as many LLMs as possible, as quickly as possible in the arena!
====
The Stakes
Break a model and claim your share of the $40,000 in bounties! With various jailbreak bounties and top hacker rewards, there are plenty of opportunities to win. Winners will also receive priority consideration for employment and internship opportunities at Gray Swan AI.
====
Ready to rise to the challenge? Join us and show the world what you can do!
See you in the arena!
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62710bd57b9f120adb36e451/xv02RE8VgayDPDE6jkwV2.png",
"fullname": "Mateusz Dziemian",
"name": "mattmdjaga",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 27,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62710bd57b9f120adb36e451/C-4w3m7w0SPUYFv5TyUuZ.jpeg"
}
] | [] | [
{
"reaction": "🚀",
"users": [
"mattmdjaga",
"unclemusclez",
"ozzaney"
],
"count": 3
},
{
"reaction": "👀",
"users": [
"John6666",
"antiven0m",
"ozzaney"
],
"count": 3
},
{
"reaction": "👍",
"users": [
"titan087",
"ozzaney"
],
"count": 2
}
] | 2024-08-27T18:17:55.000Z | 2024-08-28T18:54:41.555Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64b9f4569d8360edd98a9b62/HsPmCGBbgcrtj0VLVY_ex.png",
"fullname": "fhsp",
"name": "fhsp93",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3,
"isFollowing": false
}
] | /posts/mattmdjaga/125833594432723 | 1,973 | 1 |
360533104721820 | [
{
"type": "text",
"value": "🌐 Introducing a comprehensive dataset of subdomain statistics - ",
"raw": "🌐 Introducing a comprehensive dataset of subdomain statistics - ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/nyuuzyou/subdomains",
"resource": {
"type": "dataset",
"id": "nyuuzyou/subdomains",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/nyuuzyou/subdomains",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Dataset highlights:",
"raw": "Dataset highlights:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Monthly archives of subdomain statistics from scanner.ducks.party",
"raw": "- Monthly archives of subdomain statistics from scanner.ducks.party",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Over 20 million unique subdomains per month",
"raw": "- Over 20 million unique subdomains per month",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Up to 228 million total subdomain occurrences monthly",
"raw": "- Up to 228 million total subdomain occurrences monthly",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Each entry includes: Subdomain name and occurrence count",
"raw": "- Each entry includes: Subdomain name and occurrence count",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Data spans November 2023 to July 2024",
"raw": "- Data spans November 2023 to July 2024",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Reflects websites accessible from Russia",
"raw": "- Reflects websites accessible from Russia",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Ideal for researchers studying web infrastructure, domain name patterns, and internet trends. Can also be valuable for cybersecurity analysis and web crawling optimizations.",
"raw": "Ideal for researchers studying web infrastructure, domain name patterns, and internet trends. Can also be valuable for cybersecurity analysis and web crawling optimizations.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 🌐 Introducing a comprehensive dataset of subdomain statistics - https://huggingface.co/datasets/nyuuzyou/subdomains
Dataset highlights:
- Monthly archives of subdomain statistics from scanner.ducks.party
- Over 20 million unique subdomains per month
- Up to 228 million total subdomain occurrences monthly
- Each entry includes: Subdomain name and occurrence count
- Data spans November 2023 to July 2024
- Reflects websites accessible from Russia
Ideal for researchers studying web infrastructure, domain name patterns, and internet trends. Can also be valuable for cybersecurity analysis and web crawling optimizations. | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/643ac5d2e2b979ae6144d68c/Z7PCNopn4cQeAYnVJDoqG.png",
"fullname": "nyuuzyou",
"name": "nyuuzyou",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 58,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666",
"louisbrulenaudet"
],
"count": 2
}
] | 2024-08-27T18:07:37.000Z | 2024-08-27T18:07:37.116Z | [] | /posts/nyuuzyou/360533104721820 | 508 | 0 |
413560714083111 | [
{
"type": "text",
"value": " ‘AI in the News’ of the day:",
"raw": " ‘AI in the News’ of the day:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Anthropic publishes the ‘system prompts’ that make Claude tick",
"raw": "Anthropic publishes the ‘system prompts’ that make Claude tick",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- \"In its continued effort to paint itself as a more ethical, transparent AI vendor, Anthropic has published the system prompts for its latest models\"",
"raw": "- \"In its continued effort to paint itself as a more ethical, transparent AI vendor, Anthropic has published the system prompts for its latest models\"",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- They specify that “Claude cannot open URLs, links, or videos, perform facial recognition or identify or name any humans in photos.",
"raw": "- They specify that “Claude cannot open URLs, links, or videos, perform facial recognition or identify or name any humans in photos.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- \"Anthropic is exerting pressure on competitors to publish the same. We’ll have to see if the gambit works.\"",
"raw": "- \"Anthropic is exerting pressure on competitors to publish the same. We’ll have to see if the gambit works.\"",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://techcrunch.com/2024/08/26/anthropic-publishes-the-system-prompt-that-makes-claude-tick/",
"resource": null,
"url": null,
"href": "https://techcrunch.com/2024/08/26/anthropic-publishes-the-system-prompt-that-makes-claude-tick/",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "China’s tech giants splash out on AI despite US restrictions (paywall)",
"raw": "China’s tech giants splash out on AI despite US restrictions (paywall)",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- \"Alibaba, Tencent and Baidu had combined capital expenditure of Rmb50bn ($7bn) in the first half, compared with Rmb23bn a year earlier. TikTok parent ByteDance (which is private) has also increased AI-related spending\"",
"raw": "- \"Alibaba, Tencent and Baidu had combined capital expenditure of Rmb50bn ($7bn) in the first half, compared with Rmb23bn a year earlier. TikTok parent ByteDance (which is private) has also increased AI-related spending\"",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Nvidia's H100 and upcoming Blackwell series are under US restrictions, but China’s tech giants can buy H20",
"raw": "- Nvidia's H100 and upcoming Blackwell series are under US restrictions, but China’s tech giants can buy H20",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Analysts expect Nvidia to ship more than 1mn of the processors to Chinese tech groups in the coming months.",
"raw": "- Analysts expect Nvidia to ship more than 1mn of the processors to Chinese tech groups in the coming months.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://www.ft.com/content/31bffc48-2ca7-472b-9d53-3deaad2d86ce",
"resource": null,
"url": null,
"href": "https://www.ft.com/content/31bffc48-2ca7-472b-9d53-3deaad2d86ce",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "MZ \"said it was improper for the Biden administration to have pressured Facebook to censor content in 2021 related to the coronavirus pandemic\"",
"raw": "MZ \"said it was improper for the Biden administration to have pressured Facebook to censor content in 2021 related to the coronavirus pandemic\"",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- \"At the time, Facebook’s publicly stated goal was to push millions of people toward Covid-19 vaccines. In his letter, Zuckerberg didn’t indicate whether he had changed his mind about that goal\"",
"raw": "- \"At the time, Facebook’s publicly stated goal was to push millions of people toward Covid-19 vaccines. In his letter, Zuckerberg didn’t indicate whether he had changed his mind about that goal\"",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://www.wsj.com/tech/mark-zuckerberg-neutral-politics-letter-election-2024-02b86372",
"resource": null,
"url": null,
"href": "https://www.wsj.com/tech/mark-zuckerberg-neutral-politics-letter-election-2024-02b86372",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Food for thought:",
"raw": "Food for thought:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Why don’t women use artificial intelligence?",
"raw": "- Why don’t women use artificial intelligence?",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://www.economist.com/finance-and-economics/2024/08/21/why-dont-women-use-artificial-intelligence",
"resource": null,
"url": null,
"href": "https://www.economist.com/finance-and-economics/2024/08/21/why-dont-women-use-artificial-intelligence",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Most AI avatars look female, young and attractive. Are they a passing trend or here to stay?",
"raw": "- Most AI avatars look female, young and attractive. Are they a passing trend or here to stay?",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://reutersinstitute.politics.ox.ac.uk/news/most-ai-avatars-look-female-young-and-attractive-are-they-passing-trend-or-here-stay",
"resource": null,
"url": null,
"href": "https://reutersinstitute.politics.ox.ac.uk/news/most-ai-avatars-look-female-young-and-attractive-are-they-passing-trend-or-here-stay",
"user": null,
"lang": null,
"code": null,
"label": null
}
] | ‘AI in the News’ of the day:
Anthropic publishes the ‘system prompts’ that make Claude tick
- "In its continued effort to paint itself as a more ethical, transparent AI vendor, Anthropic has published the system prompts for its latest models"
- They specify that “Claude cannot open URLs, links, or videos, perform facial recognition or identify or name any humans in photos.
- "Anthropic is exerting pressure on competitors to publish the same. We’ll have to see if the gambit works."
https://techcrunch.com/2024/08/26/anthropic-publishes-the-system-prompt-that-makes-claude-tick/
China’s tech giants splash out on AI despite US restrictions (paywall)
- "Alibaba, Tencent and Baidu had combined capital expenditure of Rmb50bn ($7bn) in the first half, compared with Rmb23bn a year earlier. TikTok parent ByteDance (which is private) has also increased AI-related spending"
- Nvidia's H100 and upcoming Blackwell series are under US restrictions, but China’s tech giants can buy H20
- Analysts expect Nvidia to ship more than 1mn of the processors to Chinese tech groups in the coming months.
https://www.ft.com/content/31bffc48-2ca7-472b-9d53-3deaad2d86ce
MZ "said it was improper for the Biden administration to have pressured Facebook to censor content in 2021 related to the coronavirus pandemic"
- "At the time, Facebook’s publicly stated goal was to push millions of people toward Covid-19 vaccines. In his letter, Zuckerberg didn’t indicate whether he had changed his mind about that goal"
https://www.wsj.com/tech/mark-zuckerberg-neutral-politics-letter-election-2024-02b86372
Food for thought:
- Why don’t women use artificial intelligence?
https://www.economist.com/finance-and-economics/2024/08/21/why-dont-women-use-artificial-intelligence
- Most AI avatars look female, young and attractive. Are they a passing trend or here to stay?
https://reutersinstitute.politics.ox.ac.uk/news/most-ai-avatars-look-female-young-and-attractive-are-they-passing-trend-or-here-stay | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg",
"fullname": "Florent Daudens",
"name": "fdaudens",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 364,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"jeffboudier",
"alielfilali01",
"brunatrevelin",
"frimelle",
"louisbrulenaudet"
],
"count": 5
},
{
"reaction": "❤️",
"users": [
"yjernite",
"alielfilali01",
"frimelle"
],
"count": 3
},
{
"reaction": "👀",
"users": [
"yjernite",
"John6666"
],
"count": 2
}
] | 2024-08-27T17:22:31.000Z | 2024-08-27T17:22:31.381Z | [] | /posts/fdaudens/413560714083111 | 1,494 | 0 |
448480679991794 | [
{
"type": "text",
"value": "Open-Devin has been rebranded as OpenHands on GitHub. ",
"raw": "Open-Devin has been rebranded as OpenHands on GitHub. ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Docs: ",
"raw": "Docs: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://docs.all-hands.dev/modules/usage/llms/local-llms",
"resource": null,
"url": null,
"href": "https://docs.all-hands.dev/modules/usage/llms/local-llms",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "GitHub: ",
"raw": "GitHub: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/All-Hands-AI/OpenHands",
"resource": null,
"url": null,
"href": "https://github.com/All-Hands-AI/OpenHands",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Paper: ",
"raw": "Paper: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://arxiv.org/abs/2407.16741",
"resource": null,
"url": null,
"href": "https://arxiv.org/abs/2407.16741",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Post on X: ",
"raw": "Post on X: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://x.com/gneubig/status/1828097484599759349?s=61&t=KYwlyP7fHVw9cFnwZFzIag",
"resource": null,
"url": null,
"href": "https://x.com/gneubig/status/1828097484599759349?s=61&t=KYwlyP7fHVw9cFnwZFzIag",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Open-Devin has been rebranded as OpenHands on GitHub.
Docs: https://docs.all-hands.dev/modules/usage/llms/local-llms
GitHub: https://github.com/All-Hands-AI/OpenHands
Paper: https://arxiv.org/abs/2407.16741
Post on X: https://x.com/gneubig/status/1828097484599759349?s=61&t=KYwlyP7fHVw9cFnwZFzIag
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f43448a79c1ba4c353d0d8f/DiSygV3dn7A_OjmGVTrHD.jpeg",
"fullname": "Sugato Ray",
"name": "sugatoray",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 11,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/5f43448a79c1ba4c353d0d8f/BdzhkeBuWbgi5towa34rk.jpeg"
}
] | [] | [
{
"reaction": "👀",
"users": [
"John6666",
"ar08"
],
"count": 2
}
] | 2024-08-27T16:44:02.000Z | 2024-08-31T15:40:04.599Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6627749287a39d8863764901/IhHcoEPo1TJ6pAybdQPJc.png",
"fullname": "Aritra Roy",
"name": "ar08",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2,
"isFollowing": false
}
] | /posts/sugatoray/448480679991794 | 527 | 1 |
760274535163970 | [
{
"type": "text",
"value": "Save money on your compute bill by using LMCache to share prefix KV between 2 different vllm instances. By deploying LMCache backend along with your vLLM containers, you can share a prefix KV Cache between 2 different containers and models. It is very simple to implement into your existing stack. ",
"raw": "Save money on your compute bill by using LMCache to share prefix KV between 2 different vllm instances. By deploying LMCache backend along with your vLLM containers, you can share a prefix KV Cache between 2 different containers and models. It is very simple to implement into your existing stack. ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Step 1: Pull docker images",
"raw": "Step 1: Pull docker images",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "code_fence",
"value": null,
"raw": "```\ndocker pull apostacyh/vllm:lmcache-0.1.0\n```",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": "docker pull apostacyh/vllm:lmcache-0.1.0",
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Step 2: Start vLLM + LMCache",
"raw": "Step 2: Start vLLM + LMCache",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "code_fence",
"value": null,
"raw": "```\nmodel=mistralai/Mistral-7B-Instruct-v0.2 # Replace with your model name\nsudo docker run --runtime nvidia --gpus '\"device=0\"' \\\n -v <Huggingface cache dir on your local machine>:/root/.cache/huggingface \\\n -p 8000:8000 \\\n --env \"HF_TOKEN=<Your huggingface access token>\" \\\n --ipc=host \\\n --network=host \\\n apostacyh/vllm:lmcache-0.1.0 \\\n --model $model --gpu-memory-utilization 0.6 --port 8000 \\\n --lmcache-config-file /lmcache/LMCache/examples/example-local.yaml\n```",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": "model=mistralai/Mistral-7B-Instruct-v0.2 # Replace with your model name\nsudo docker run --runtime nvidia --gpus '\"device=0\"' \\\n -v <Huggingface cache dir on your local machine>:/root/.cache/huggingface \\\n -p 8000:8000 \\\n --env \"HF_TOKEN=<Your huggingface access token>\" \\\n --ipc=host \\\n --network=host \\\n apostacyh/vllm:lmcache-0.1.0 \\\n --model $model --gpu-memory-utilization 0.6 --port 8000 \\\n --lmcache-config-file /lmcache/LMCache/examples/example-local.yaml",
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "You can add another vLLM instance as long as its on a separate GPU by simply deploying another: ",
"raw": "You can add another vLLM instance as long as its on a separate GPU by simply deploying another: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "code_fence",
"value": null,
"raw": "```\n# The second vLLM instance listens at port 8001\nmodel=mistralai/Mistral-7B-Instruct-v0.2 # Replace with your model name\nsudo docker run --runtime nvidia --gpus '\"device=1\"' \\\n -v <Huggingface cache dir on your local machine>:/root/.cache/huggingface \\\n -p 8001:8001 \\\n --env \"HF_TOKEN=<Your huggingface token>\" \\\n --ipc=host \\\n --network=host \\\n apostacyh/vllm:lmcache-0.1.0 \\\n --model $model --gpu-memory-utilization 0.7 --port 8001 \\\n --lmcache-config-file /lmcache/LMCache/examples/example.yaml\n```",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": "# The second vLLM instance listens at port 8001\nmodel=mistralai/Mistral-7B-Instruct-v0.2 # Replace with your model name\nsudo docker run --runtime nvidia --gpus '\"device=1\"' \\\n -v <Huggingface cache dir on your local machine>:/root/.cache/huggingface \\\n -p 8001:8001 \\\n --env \"HF_TOKEN=<Your huggingface token>\" \\\n --ipc=host \\\n --network=host \\\n apostacyh/vllm:lmcache-0.1.0 \\\n --model $model --gpu-memory-utilization 0.7 --port 8001 \\\n --lmcache-config-file /lmcache/LMCache/examples/example.yaml",
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "This method supports local, remote or hybrid backends so whichever vLLM deployment method you are already using should work with the LMCache container (excluding BentoML).",
"raw": "This method supports local, remote or hybrid backends so whichever vLLM deployment method you are already using should work with the LMCache container (excluding BentoML).",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "LMCache: ",
"raw": "LMCache: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/LMCache/LMCache/tree/dev",
"resource": null,
"url": null,
"href": "https://github.com/LMCache/LMCache/tree/dev",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "vLLM: ",
"raw": "vLLM: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/vllm-project/vllm",
"resource": null,
"url": null,
"href": "https://github.com/vllm-project/vllm",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Save money on your compute bill by using LMCache to share prefix KV between 2 different vllm instances. By deploying LMCache backend along with your vLLM containers, you can share a prefix KV Cache between 2 different containers and models. It is very simple to implement into your existing stack.
Step 1: Pull docker images
```
docker pull apostacyh/vllm:lmcache-0.1.0
```
Step 2: Start vLLM + LMCache
```
model=mistralai/Mistral-7B-Instruct-v0.2 # Replace with your model name
sudo docker run --runtime nvidia --gpus '"device=0"' \
-v <Huggingface cache dir on your local machine>:/root/.cache/huggingface \
-p 8000:8000 \
--env "HF_TOKEN=<Your huggingface access token>" \
--ipc=host \
--network=host \
apostacyh/vllm:lmcache-0.1.0 \
--model $model --gpu-memory-utilization 0.6 --port 8000 \
--lmcache-config-file /lmcache/LMCache/examples/example-local.yaml
```
You can add another vLLM instance as long as its on a separate GPU by simply deploying another:
```
# The second vLLM instance listens at port 8001
model=mistralai/Mistral-7B-Instruct-v0.2 # Replace with your model name
sudo docker run --runtime nvidia --gpus '"device=1"' \
-v <Huggingface cache dir on your local machine>:/root/.cache/huggingface \
-p 8001:8001 \
--env "HF_TOKEN=<Your huggingface token>" \
--ipc=host \
--network=host \
apostacyh/vllm:lmcache-0.1.0 \
--model $model --gpu-memory-utilization 0.7 --port 8001 \
--lmcache-config-file /lmcache/LMCache/examples/example.yaml
```
This method supports local, remote or hybrid backends so whichever vLLM deployment method you are already using should work with the LMCache container (excluding BentoML).
LMCache: https://github.com/LMCache/LMCache/tree/dev
vLLM: https://github.com/vllm-project/vllm
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6455cc8d679315e4ef16fbec/M6Cfifn05BUzkCFd2QDIT.png",
"fullname": "Tim Dolan",
"name": "macadeliccc",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 152,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6455cc8d679315e4ef16fbec/IDdwe38jcdKG_6zN_m51s.png"
}
] | [] | [
{
"reaction": "👀",
"users": [
"John6666",
"alielfilali01",
"Kayvane",
"comarproject",
"ChavyvAkvar",
"JJiayi",
"louisbrulenaudet"
],
"count": 7
},
{
"reaction": "🔥",
"users": [
"comarproject",
"sasikiran",
"kuntai",
"JJiayi",
"anttip"
],
"count": 5
}
] | 2024-08-27T16:04:09.000Z | 2024-08-27T16:04:09.567Z | [] | /posts/macadeliccc/760274535163970 | 1,263 | 0 |
325893931242862 | [
{
"type": "text",
"value": "With the open-weight release of CogVideoX-5B from THUDM, i.e. GLM team, the Video Generation Model (how about calling it VGM) field has officially became the next booming \"LLM\" ",
"raw": "With the open-weight release of CogVideoX-5B from THUDM, i.e. GLM team, the Video Generation Model (how about calling it VGM) field has officially became the next booming \"LLM\" ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "What does the landscape look like? What are other video generation models? This collection below is all your need.",
"raw": "What does the landscape look like? What are other video generation models? This collection below is all your need.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/xianbao/video-generation-models-66c350163c74f60f5c412af6",
"resource": {
"type": "collection",
"id": "xianbao/video-generation-models-66c350163c74f60f5c412af6",
"discussionNum": null
},
"url": "https://huggingface.co/collections/xianbao/video-generation-models-66c350163c74f60f5c412af6",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "The above video is generated by ",
"raw": "The above video is generated by ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "mention",
"value": null,
"raw": "@a-r-r-o-w",
"resource": null,
"url": null,
"href": null,
"user": "a-r-r-o-w",
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " with CogVideoX-5B, taken from a nice lookout for the field!",
"raw": " with CogVideoX-5B, taken from a nice lookout for the field!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | With the open-weight release of CogVideoX-5B from THUDM, i.e. GLM team, the Video Generation Model (how about calling it VGM) field has officially became the next booming "LLM"
What does the landscape look like? What are other video generation models? This collection below is all your need.
https://huggingface.co/collections/xianbao/video-generation-models-66c350163c74f60f5c412af6
The above video is generated by @a-r-r-o-w with CogVideoX-5B, taken from a nice lookout for the field! | {
"avatarUrl": "/avatars/703dd06469aaac724c94f622262b14e8.svg",
"fullname": "Tiezhen WANG",
"name": "xianbao",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 86,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62d22496c58f969c152bcefd/IwcLLrKlyhyffT2zkRsix.mp4"
}
] | [
{
"avatarUrl": "/avatars/28a1fd1be363833655784c83267c9c18.svg",
"fullname": "Aryan V S",
"name": "a-r-r-o-w",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 20
}
] | [
{
"reaction": "👀",
"users": [
"John6666",
"joseEjmendez",
"a-r-r-o-w",
"victor"
],
"count": 4
},
{
"reaction": "🚀",
"users": [
"YaTharThShaRma999",
"joseEjmendez",
"a-r-r-o-w",
"victor"
],
"count": 4
},
{
"reaction": "👍",
"users": [
"Winnougan",
"victor"
],
"count": 2
}
] | 2024-08-27T14:02:25.000Z | 2024-08-27T14:02:25.406Z | [] | /posts/xianbao/325893931242862 | 1,610 | 0 |
843857941763242 | [
{
"type": "link",
"value": null,
"raw": "https://hf.co/chat/assistant/66cdd00593f62773e9346736",
"resource": null,
"url": null,
"href": "https://hf.co/chat/assistant/66cdd00593f62773e9346736",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Language recognition bot detecting emotions in English. and other languages around the world",
"raw": "Language recognition bot detecting emotions in English. and other languages around the world",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | https://hf.co/chat/assistant/66cdd00593f62773e9346736
Language recognition bot detecting emotions in English. and other languages around the world | {
"avatarUrl": "/avatars/d773a7dd9b706759131fc482ab71ced7.svg",
"fullname": "[email protected]",
"name": "Taf2023",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 8,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/64841af2295256340e4b9f88/Tj3E2t61VoExiAU665UHO.webp"
}
] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-08-27T13:16:09.000Z | 2024-08-27T13:16:09.845Z | [] | /posts/Taf2023/843857941763242 | 418 | 0 |
376070620637167 | [
{
"type": "text",
"value": "🆕 🚀 🏎 fast-sentence-transformers - simply, faster, sentence-transformers",
"raw": "🆕 🚀 🏎 fast-sentence-transformers - simply, faster, sentence-transformers",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Released an initial version a while ago ",
"raw": "Released an initial version a while ago ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Archived it because of a cleaner solution described in a blog by Philipp Schmid",
"raw": "Archived it because of a cleaner solution described in a blog by Philipp Schmid",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Reimplemented it based on that cleaner solution",
"raw": "Reimplemented it based on that cleaner solution",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Unarchived the project",
"raw": "Unarchived the project",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Packaged it up ",
"raw": "Packaged it up ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Released a 0.5 version",
"raw": "Released a 0.5 version",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "inline_code",
"value": null,
"raw": "`pip install fast-sentence-transformers `",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": "pip install fast-sentence-transformers ",
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/davidberenstein1957/fast-sentence-transformers",
"resource": null,
"url": null,
"href": "https://github.com/davidberenstein1957/fast-sentence-transformers",
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 🆕 🚀 🏎 fast-sentence-transformers - simply, faster, sentence-transformers
Released an initial version a while ago
Archived it because of a cleaner solution described in a blog by Philipp Schmid
Reimplemented it based on that cleaner solution
Unarchived the project
Packaged it up
Released a 0.5 version
`pip install fast-sentence-transformers `
https://github.com/davidberenstein1957/fast-sentence-transformers | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1677141720071-634ff41ff32062e9eb7b06a3.jpeg",
"fullname": "David Berenstein",
"name": "davidberenstein1957",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 148,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-08-27T09:11:26.000Z | 2024-08-27T09:11:26.102Z | [] | /posts/davidberenstein1957/376070620637167 | 508 | 0 |
752302716328381 | [
{
"type": "text",
"value": "Pushed a new update to ",
"raw": "Pushed a new update to ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/vikhyatk/moondream2",
"resource": {
"type": "model",
"id": "vikhyatk/moondream2",
"discussionNum": null
},
"url": "https://huggingface.co/vikhyatk/moondream2",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " today. TextVQA up from 60.2 to 65.2, DocVQA up from 61.9 to 70.5. ",
"raw": " today. TextVQA up from 60.2 to 65.2, DocVQA up from 61.9 to 70.5. ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Space has been updated to the new model if you want to try it out! ",
"raw": "Space has been updated to the new model if you want to try it out! ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/vikhyatk/moondream2",
"resource": {
"type": "space",
"id": "vikhyatk/moondream2",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/vikhyatk/moondream2",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Pushed a new update to https://huggingface.co/vikhyatk/moondream2 today. TextVQA up from 60.2 to 65.2, DocVQA up from 61.9 to 70.5.
Space has been updated to the new model if you want to try it out! https://huggingface.co/spaces/vikhyatk/moondream2 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63117568fa95534e218da163/8h9zN8aKRxPLBnXW7sqY9.jpeg",
"fullname": "Vik Korrapati",
"name": "vikhyatk",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 365,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/63117568fa95534e218da163/7W_30RPOohbcxV_CkIGvB.png"
}
] | [] | [
{
"reaction": "🔥",
"users": [
"John6666",
"Lucky1960",
"YaTharThShaRma999",
"danielus",
"victor",
"gokaygokay",
"reach-vb",
"umair894",
"andito",
"tokorinaga",
"emiltamas",
"not-lain",
"clem"
],
"count": 13
},
{
"reaction": "🚀",
"users": [
"louisbrulenaudet",
"emiltamas",
"clem"
],
"count": 3
}
] | 2024-08-27T00:38:41.000Z | 2024-08-27T00:38:41.719Z | [] | /posts/vikhyatk/752302716328381 | 4,169 | 0 |
134261235879026 | [
{
"type": "mention",
"value": null,
"raw": "@KingNish",
"resource": null,
"url": null,
"href": null,
"user": "KingNish",
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " your instant video space is down, its not working...",
"raw": " your instant video space is down, its not working...",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | @KingNish your instant video space is down, its not working... | {
"avatarUrl": "/avatars/0a41f0b475da226a213250355d27f85f.svg",
"fullname": "Jacque Montague Raymer",
"name": "raymerjacque",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
} | [] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6612aedf09f16e7347dfa7e1/bPYjBXCedY_1fSIPjoBTY.jpeg",
"fullname": "Nishith Jain",
"name": "KingNish",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1072
}
] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-08-26T22:54:20.000Z | 2024-08-27T13:26:40.407Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6612aedf09f16e7347dfa7e1/bPYjBXCedY_1fSIPjoBTY.jpeg",
"fullname": "Nishith Jain",
"name": "KingNish",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1072,
"isFollowing": false
}
] | /posts/raymerjacque/134261235879026 | 637 | 1 |
922119214537036 | [
{
"type": "text",
"value": "This merge, this time grounded in Gemma2 9B Instruct fine-tunes, is another demonstration that models without any fine-tuning to support roleplay can still perform the function, maintaining coherence and attention to context. It should be evident that no overt fine-tuning is required for roleplay in text generation; pretraining should provide models with a requisite basic understanding of the world, so all that should be needed is some corrective fine-tuning to address observed defects in portraying the world along with datasets to promote a suitably entertaining writing style. Good Instruct tuning should promote reasoning, coherence, and attention to context.",
"raw": "This merge, this time grounded in Gemma2 9B Instruct fine-tunes, is another demonstration that models without any fine-tuning to support roleplay can still perform the function, maintaining coherence and attention to context. It should be evident that no overt fine-tuning is required for roleplay in text generation; pretraining should provide models with a requisite basic understanding of the world, so all that should be needed is some corrective fine-tuning to address observed defects in portraying the world along with datasets to promote a suitably entertaining writing style. Good Instruct tuning should promote reasoning, coherence, and attention to context.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/grimjim/Kitsunebi-v1-Gemma2-8k-9B",
"resource": {
"type": "model",
"id": "grimjim/Kitsunebi-v1-Gemma2-8k-9B",
"discussionNum": null
},
"url": "https://huggingface.co/grimjim/Kitsunebi-v1-Gemma2-8k-9B",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/grimjim/Kitsunebi-v1-Gemma2-8k-9B-GGUF",
"resource": {
"type": "model",
"id": "grimjim/Kitsunebi-v1-Gemma2-8k-9B-GGUF",
"discussionNum": null
},
"url": "https://huggingface.co/grimjim/Kitsunebi-v1-Gemma2-8k-9B-GGUF",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "I opted not to incorporate the UCLA SPPO fine-tune for Gemma2 9B after observing context confusion occur with some frequency during complex scenarios.",
"raw": "I opted not to incorporate the UCLA SPPO fine-tune for Gemma2 9B after observing context confusion occur with some frequency during complex scenarios.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Thanks to Axcxept co., ltd. for fine-tuning HODACHI/EZO-Common-9B-gemma-2-it, and to Princeton NLP Group for fine-tuning princeton-nlp/gemma-2-9b-it-SimPO.",
"raw": "Thanks to Axcxept co., ltd. for fine-tuning HODACHI/EZO-Common-9B-gemma-2-it, and to Princeton NLP Group for fine-tuning princeton-nlp/gemma-2-9b-it-SimPO.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/AXCXEPT/EZO-Common-9B-gemma-2-it",
"resource": {
"type": "model",
"id": "AXCXEPT/EZO-Common-9B-gemma-2-it",
"discussionNum": null
},
"url": "https://huggingface.co/AXCXEPT/EZO-Common-9B-gemma-2-it",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/princeton-nlp/gemma-2-9b-it-SimPO",
"resource": {
"type": "model",
"id": "princeton-nlp/gemma-2-9b-it-SimPO",
"discussionNum": null
},
"url": "https://huggingface.co/princeton-nlp/gemma-2-9b-it-SimPO",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | This merge, this time grounded in Gemma2 9B Instruct fine-tunes, is another demonstration that models without any fine-tuning to support roleplay can still perform the function, maintaining coherence and attention to context. It should be evident that no overt fine-tuning is required for roleplay in text generation; pretraining should provide models with a requisite basic understanding of the world, so all that should be needed is some corrective fine-tuning to address observed defects in portraying the world along with datasets to promote a suitably entertaining writing style. Good Instruct tuning should promote reasoning, coherence, and attention to context.
https://huggingface.co/grimjim/Kitsunebi-v1-Gemma2-8k-9B
https://huggingface.co/grimjim/Kitsunebi-v1-Gemma2-8k-9B-GGUF
I opted not to incorporate the UCLA SPPO fine-tune for Gemma2 9B after observing context confusion occur with some frequency during complex scenarios.
Thanks to Axcxept co., ltd. for fine-tuning HODACHI/EZO-Common-9B-gemma-2-it, and to Princeton NLP Group for fine-tuning princeton-nlp/gemma-2-9b-it-SimPO.
https://huggingface.co/AXCXEPT/EZO-Common-9B-gemma-2-it
https://huggingface.co/princeton-nlp/gemma-2-9b-it-SimPO | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65c992424936ab38ecf706b0/aq7vuHFPO1S93fwJk0Cuq.jpeg",
"fullname": "Jim Lai",
"name": "grimjim",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 163,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"John6666",
"djuna",
"prithivMLmods"
],
"count": 3
}
] | 2024-08-26T22:36:36.000Z | 2024-08-26T22:38:55.300Z | [] | /posts/grimjim/922119214537036 | 1,633 | 0 |
856883397800186 | [
{
"type": "text",
"value": "We recently worked with OpenAI to fine-tune gpt-4o and built the SOTA model for the ",
"raw": "We recently worked with OpenAI to fine-tune gpt-4o and built the SOTA model for the ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/patched-codes/static-analysis-eval",
"resource": {
"type": "dataset",
"id": "patched-codes/static-analysis-eval",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/patched-codes/static-analysis-eval",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " benchmark. All the code and data ",
"raw": " benchmark. All the code and data ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/patched-codes/synth-vuln-fixes",
"resource": {
"type": "dataset",
"id": "patched-codes/synth-vuln-fixes",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/patched-codes/synth-vuln-fixes",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " on how we did it is available on their GitHub - ",
"raw": " on how we did it is available on their GitHub - ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/openai/build-hours/tree/main/5-4o_fine_tuning",
"resource": null,
"url": null,
"href": "https://github.com/openai/build-hours/tree/main/5-4o_fine_tuning",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": ".",
"raw": ".",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Here are some tips based on our experience:",
"raw": "Here are some tips based on our experience:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "→ Establish baseline with \"conditioning\" / prompting",
"raw": "→ Establish baseline with \"conditioning\" / prompting",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "→ Task-specific datasets are ideal for PEFT; hard to beat gpt-4o on \"broad\" tasks",
"raw": "→ Task-specific datasets are ideal for PEFT; hard to beat gpt-4o on \"broad\" tasks",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "→ Add your best system prompt to each example",
"raw": "→ Add your best system prompt to each example",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "→ Ensure training data distribution is similar to inference data",
"raw": "→ Ensure training data distribution is similar to inference data",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "→ Shorten instructions with concise prompts; may require more examples.",
"raw": "→ Shorten instructions with concise prompts; may require more examples.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "→ Define clear evaluation metrics (seriously, please eval!)",
"raw": "→ Define clear evaluation metrics (seriously, please eval!)",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "You can see more details on the benchmark and process here - ",
"raw": "You can see more details on the benchmark and process here - ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://www.patched.codes/blog/the-static-analysis-evaluation-benchmark-measuring-llm-performance-in-fixing-software-vulnerabilities",
"resource": null,
"url": null,
"href": "https://www.patched.codes/blog/the-static-analysis-evaluation-benchmark-measuring-llm-performance-in-fixing-software-vulnerabilities",
"user": null,
"lang": null,
"code": null,
"label": null
}
] | We recently worked with OpenAI to fine-tune gpt-4o and built the SOTA model for the https://huggingface.co/datasets/patched-codes/static-analysis-eval benchmark. All the code and data https://huggingface.co/datasets/patched-codes/synth-vuln-fixes on how we did it is available on their GitHub - https://github.com/openai/build-hours/tree/main/5-4o_fine_tuning.
Here are some tips based on our experience:
→ Establish baseline with "conditioning" / prompting
→ Task-specific datasets are ideal for PEFT; hard to beat gpt-4o on "broad" tasks
→ Add your best system prompt to each example
→ Ensure training data distribution is similar to inference data
→ Shorten instructions with concise prompts; may require more examples.
→ Define clear evaluation metrics (seriously, please eval!)
You can see more details on the benchmark and process here - https://www.patched.codes/blog/the-static-analysis-evaluation-benchmark-measuring-llm-performance-in-fixing-software-vulnerabilities | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1677134945205-62f32eab52ad88c930bb3f3b.png",
"fullname": "Asankhaya Sharma",
"name": "codelion",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 46,
"isFollowing": false
} | [] | [] | [
{
"reaction": "❤️",
"users": [
"codelion",
"rikhoffbauer2",
"Mackin7",
"KingNish"
],
"count": 4
},
{
"reaction": "🔥",
"users": [
"codelion",
"KingNish",
"Bruhn"
],
"count": 3
},
{
"reaction": "👀",
"users": [
"John6666",
"codelion"
],
"count": 2
},
{
"reaction": "🚀",
"users": [
"codelion"
],
"count": 1
},
{
"reaction": "🤗",
"users": [
"codelion"
],
"count": 1
},
{
"reaction": "➕",
"users": [
"codelion"
],
"count": 1
}
] | 2024-08-26T20:15:23.000Z | 2024-08-26T20:16:11.224Z | [] | /posts/codelion/856883397800186 | 1,891 | 0 |
874350060176053 | [
{
"type": "text",
"value": "Tell Me About the World is based on Concepts, Relationships, and Context. This is how we as humans learn about the world. If you were to distill geometry, or philosophy, you would get: Concepts, Relationships, and Context. Using two Colab Notebooks, we demonstrate beyond any shadow of a doubt that it is possible to educate LLM models using this framework of Concepts, Relationships, and Context, and that the model actually grasps the relationships and context when we do. Explore the full code behind 'AI ABC's' and 'AI 123's' in our Colab Notebooks which are available from this video!",
"raw": "Tell Me About the World is based on Concepts, Relationships, and Context. This is how we as humans learn about the world. If you were to distill geometry, or philosophy, you would get: Concepts, Relationships, and Context. Using two Colab Notebooks, we demonstrate beyond any shadow of a doubt that it is possible to educate LLM models using this framework of Concepts, Relationships, and Context, and that the model actually grasps the relationships and context when we do. Explore the full code behind 'AI ABC's' and 'AI 123's' in our Colab Notebooks which are available from this video!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://www.youtube.com/watch?v=yz0sd8ayenI",
"resource": null,
"url": null,
"href": "https://www.youtube.com/watch?v=yz0sd8ayenI",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Tell Me About the World is based on Concepts, Relationships, and Context. This is how we as humans learn about the world. If you were to distill geometry, or philosophy, you would get: Concepts, Relationships, and Context. Using two Colab Notebooks, we demonstrate beyond any shadow of a doubt that it is possible to educate LLM models using this framework of Concepts, Relationships, and Context, and that the model actually grasps the relationships and context when we do. Explore the full code behind 'AI ABC's' and 'AI 123's' in our Colab Notebooks which are available from this video!
https://www.youtube.com/watch?v=yz0sd8ayenI | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png",
"fullname": "Richard A Aragon",
"name": "TuringsSolutions",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 148,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🚀",
"users": [
"YaTharThShaRma999",
"linz"
],
"count": 2
},
{
"reaction": "👍",
"users": [
"theospeak",
"linz"
],
"count": 2
},
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-08-26T19:53:31.000Z | 2024-08-26T19:53:31.696Z | [] | /posts/TuringsSolutions/874350060176053 | 1,310 | 0 |
656132157332425 | [
{
"type": "text",
"value": "i today make Shadow Chat, that make you can Chat with Shadow the Hedgehog (i was just bored, so i make this lol)",
"raw": "i today make Shadow Chat, that make you can Chat with Shadow the Hedgehog (i was just bored, so i make this lol)",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "try it now in:",
"raw": "try it now in:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/Hev832/Shadow_Chat",
"resource": {
"type": "space",
"id": "Hev832/Shadow_Chat",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/Hev832/Shadow_Chat",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | i today make Shadow Chat, that make you can Chat with Shadow the Hedgehog (i was just bored, so i make this lol)
try it now in:
https://huggingface.co/spaces/Hev832/Shadow_Chat
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6456271e4095c967f9a93ec1/HE3FPqI5bBtGxvHs5D40z.png",
"fullname": "Rico",
"name": "Hev832",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 31,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666",
"y11s"
],
"count": 2
},
{
"reaction": "🚀",
"users": [
"TuringsSolutions"
],
"count": 1
}
] | 2024-08-26T19:23:12.000Z | 2024-09-01T09:47:58.673Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/2H0bUbG8rtpTE4X5_W-Lr.jpeg",
"fullname": "Muhammad Sajjad Rasool",
"name": "ReySajju742",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6456271e4095c967f9a93ec1/HE3FPqI5bBtGxvHs5D40z.png",
"fullname": "Rico",
"name": "Hev832",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 31,
"isFollowing": false
},
{
"avatarUrl": "/avatars/ac3d5337bf67e9f0799bbcafc3cb0127.svg",
"fullname": "dodi",
"name": "dodiss",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "/avatars/ff10d52109b801587085d5ec2551e3ab.svg",
"fullname": "Nsnsne",
"name": "Ndne",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
}
] | /posts/Hev832/656132157332425 | 1,393 | 7 |
888565266065129 | [
{
"type": "text",
"value": "Introducing ",
"raw": "Introducing ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/fal/AuraFace-v1",
"resource": {
"type": "model",
"id": "fal/AuraFace-v1",
"discussionNum": null
},
"url": "https://huggingface.co/fal/AuraFace-v1",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": ": Commercially available & open source identity encoder model for next generation one shot personalization. Read more about it here: ",
"raw": ": Commercially available & open source identity encoder model for next generation one shot personalization. Read more about it here: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/isidentical/auraface",
"resource": null,
"url": null,
"href": "https://huggingface.co/blog/isidentical/auraface",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Introducing https://huggingface.co/fal/AuraFace-v1: Commercially available & open source identity encoder model for next generation one shot personalization. Read more about it here: https://huggingface.co/blog/isidentical/auraface
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6380ebb8471a4550ff255c62/-5tqR0SqLU53cOsXA-4ON.jpeg",
"fullname": "Batuhan",
"name": "isidentical",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 80,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🚀",
"users": [
"gokaygokay",
"isidentical",
"tolgacangoz",
"John6666",
"not-lain",
"dsmonk",
"ClayFace",
"KingNish",
"victor",
"multimodalart",
"Nymbo",
"louisbrulenaudet",
"mkolar"
],
"count": 13
},
{
"reaction": "🔥",
"users": [
"gokaygokay",
"isidentical",
"tolgacangoz",
"KingNish",
"multimodalart",
"Bruhn"
],
"count": 6
},
{
"reaction": "🤗",
"users": [
"tolgacangoz",
"multimodalart"
],
"count": 2
},
{
"reaction": "❤️",
"users": [
"tolgacangoz",
"multimodalart"
],
"count": 2
},
{
"reaction": "🧠",
"users": [
"tolgacangoz",
"multimodalart"
],
"count": 2
},
{
"reaction": "😎",
"users": [
"tolgacangoz",
"multimodalart"
],
"count": 2
}
] | 2024-08-26T18:29:32.000Z | 2024-08-26T18:29:32.223Z | [] | /posts/isidentical/888565266065129 | 3,070 | 0 |
937747530741550 | [
{
"type": "text",
"value": "NEW COMPETITION ALERT 🚀",
"raw": "NEW COMPETITION ALERT 🚀",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/Artificio/ROAM1RealWorldAdversarialAttack",
"resource": {
"type": "space",
"id": "Artificio/ROAM1RealWorldAdversarialAttack",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/Artificio/ROAM1RealWorldAdversarialAttack",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | NEW COMPETITION ALERT 🚀
https://huggingface.co/spaces/Artificio/ROAM1RealWorldAdversarialAttack | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5fa19f4ba13e063b8b2b5e11/nGVHdTYX2udnt-K8mqY27.jpeg",
"fullname": "Abhishek Thakur",
"name": "abhishek",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 1379,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-08-26T12:45:36.000Z | 2024-08-26T12:45:36.068Z | [] | /posts/abhishek/937747530741550 | 1,489 | 0 |
503366225119253 | [
{
"type": "text",
"value": "📣 New Project Alert: Phi 3.5 Multimodal AI Demo 🎉",
"raw": "📣 New Project Alert: Phi 3.5 Multimodal AI Demo 🎉",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Excited to share my latest project that combines the power of Phi 3.5 text and vision models with text-to-speech capabilities!",
"raw": "Excited to share my latest project that combines the power of Phi 3.5 text and vision models with text-to-speech capabilities!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🔑 Key Features:",
"raw": "🔑 Key Features:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "1️⃣ Phi 3.5 Text Model for dynamic conversations",
"raw": "1️⃣ Phi 3.5 Text Model for dynamic conversations",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "2️⃣ Phi 3.5 Vision Model for advanced image analysis",
"raw": "2️⃣ Phi 3.5 Vision Model for advanced image analysis",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "3️⃣ Text-to-Speech integration for an audio dimension",
"raw": "3️⃣ Text-to-Speech integration for an audio dimension",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🛠️ Tech Stack:",
"raw": "🛠️ Tech Stack:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Transformers",
"raw": "Transformers",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Gradio",
"raw": "Gradio",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "PyTorch",
"raw": "PyTorch",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Flash Attention 2",
"raw": "Flash Attention 2",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Parler TTS",
"raw": "Parler TTS",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "This project demonstrates the potential of integrating multiple AI models to create a more comprehensive and interactive user experience. It's a step towards more natural and versatile AI assistants.",
"raw": "This project demonstrates the potential of integrating multiple AI models to create a more comprehensive and interactive user experience. It's a step towards more natural and versatile AI assistants.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "👉 Check out the demo and let me know your thoughts! How would you extend this project?",
"raw": "👉 Check out the demo and let me know your thoughts! How would you extend this project?",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🔗 Demo Link: ",
"raw": "🔗 Demo Link: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/sagar007/Multimodal_App",
"resource": {
"type": "space",
"id": "sagar007/Multimodal_App",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/sagar007/Multimodal_App",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "#MultimodalAI #PhiModel #MachineLearning #AIDemo",
"raw": "#MultimodalAI #PhiModel #MachineLearning #AIDemo",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 📣 New Project Alert: Phi 3.5 Multimodal AI Demo 🎉
Excited to share my latest project that combines the power of Phi 3.5 text and vision models with text-to-speech capabilities!
🔑 Key Features:
1️⃣ Phi 3.5 Text Model for dynamic conversations
2️⃣ Phi 3.5 Vision Model for advanced image analysis
3️⃣ Text-to-Speech integration for an audio dimension
🛠️ Tech Stack:
Transformers
Gradio
PyTorch
Flash Attention 2
Parler TTS
This project demonstrates the potential of integrating multiple AI models to create a more comprehensive and interactive user experience. It's a step towards more natural and versatile AI assistants.
👉 Check out the demo and let me know your thoughts! How would you extend this project?
🔗 Demo Link: https://huggingface.co/spaces/sagar007/Multimodal_App
#MultimodalAI #PhiModel #MachineLearning #AIDemo | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a464cfe0de0c5c6d8b04a1/1gCs46R_bW9apQzLQUrn5.png",
"fullname": "Sagar pallai",
"name": "sagar007",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 8,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62a464cfe0de0c5c6d8b04a1/W0MQTYf10PADR3hxu7y8y.png"
}
] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-08-26T11:13:40.000Z | 2024-08-26T11:25:46.749Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a464cfe0de0c5c6d8b04a1/1gCs46R_bW9apQzLQUrn5.png",
"fullname": "Sagar pallai",
"name": "sagar007",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 8,
"isFollowing": false
}
] | /posts/sagar007/503366225119253 | 612 | 1 |
500389499974376 | [
{
"type": "text",
"value": "Just crossed 200,000 free public AI datasets shared by the community on Hugging Face! Text, image, video, audio, time-series & many more... Thanks everyone!",
"raw": "Just crossed 200,000 free public AI datasets shared by the community on Hugging Face! Text, image, video, audio, time-series & many more... Thanks everyone!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "http://hf.co/datasets",
"resource": null,
"url": null,
"href": "http://hf.co/datasets",
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Just crossed 200,000 free public AI datasets shared by the community on Hugging Face! Text, image, video, audio, time-series & many more... Thanks everyone!
http://hf.co/datasets | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg",
"fullname": "Clem 🤗",
"name": "clem",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 1734,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/5e67bdd61009063689407479/FG2GHJLDgi7REoFQOWvXo.png"
}
] | [] | [
{
"reaction": "🔥",
"users": [
"ajibawa-2023",
"John6666",
"yjernite",
"KingNish",
"TheDrunkenSnail",
"Felladrin",
"Sergidev",
"fdaudens",
"osanseviero",
"kramp",
"damerajee",
"asiraja",
"Mackin7",
"Nacholmo",
"alielfilali01",
"CookieMaster",
"its5Q",
"nyuuzyou",
"Rsln",
"jsulz",
"louisbrulenaudet",
"pagezyhf"
],
"count": 22
}
] | 2024-08-26T10:37:25.000Z | 2024-08-26T10:37:44.879Z | [] | /posts/clem/500389499974376 | 4,123 | 0 |
582462020511746 | [
{
"type": "text",
"value": "Thinking about upgrading from Python 3.10 to 3.11? Here's why you should make the move - a deep technical breakdown that might convince you:",
"raw": "Thinking about upgrading from Python 3.10 to 3.11? Here's why you should make the move - a deep technical breakdown that might convince you:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": ">> Performance Revolution",
"raw": ">> Performance Revolution",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "The performance improvements are staggering, with benchmarks showing 10-60% faster execution across different workloads. Let me break down the game-changing features:",
"raw": "The performance improvements are staggering, with benchmarks showing 10-60% faster execution across different workloads. Let me break down the game-changing features:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": ">> Core Architecture Changes",
"raw": ">> Core Architecture Changes",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Python 3.11's interpreter now uses statically allocated core modules, eliminating the multi-step loading process we've dealt with in 3.10. This means your applications will start 10-15% faster out of the gate.",
"raw": "Python 3.11's interpreter now uses statically allocated core modules, eliminating the multi-step loading process we've dealt with in 3.10. This means your applications will start 10-15% faster out of the gate.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": ">> Function Optimization",
"raw": ">> Function Optimization",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "The redesigned frame objects are a thing of beauty - they've been stripped of unnecessary baggage, resulting in a 3-7% speedup for all function calls. But it gets better: function calls are now inlined, giving us a 1-3% boost, with recursive functions like Fibonacci seeing up to 1.7x improvement!",
"raw": "The redesigned frame objects are a thing of beauty - they've been stripped of unnecessary baggage, resulting in a 3-7% speedup for all function calls. But it gets better: function calls are now inlined, giving us a 1-3% boost, with recursive functions like Fibonacci seeing up to 1.7x improvement!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": ">> Adaptive Intelligence",
"raw": ">> Adaptive Intelligence",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "The new Specializing Interpreter is perhaps the most exciting addition. Think of it as a lightweight JIT - it identifies hot code paths and optimizes them automatically.",
"raw": "The new Specializing Interpreter is perhaps the most exciting addition. Think of it as a lightweight JIT - it identifies hot code paths and optimizes them automatically.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "The interpreter now automatically specializes math operations, array indexing, and even sequence unpacking based on actual usage patterns.",
"raw": "The interpreter now automatically specializes math operations, array indexing, and even sequence unpacking based on actual usage patterns.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": ">> Exception Handling Revolution",
"raw": ">> Exception Handling Revolution",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "My favorite feature? Zero-cost exceptions! Your try-except blocks no longer carry overhead when no exceptions occur. The code runs at full speed until an exception actually happens.",
"raw": "My favorite feature? Zero-cost exceptions! Your try-except blocks no longer carry overhead when no exceptions occur. The code runs at full speed until an exception actually happens.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Ready to make the switch? These improvements aren't just numbers - they're real-world performance gains waiting to be unlocked in your codebase.",
"raw": "Ready to make the switch? These improvements aren't just numbers - they're real-world performance gains waiting to be unlocked in your codebase.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Thinking about upgrading from Python 3.10 to 3.11? Here's why you should make the move - a deep technical breakdown that might convince you:
>> Performance Revolution
The performance improvements are staggering, with benchmarks showing 10-60% faster execution across different workloads. Let me break down the game-changing features:
>> Core Architecture Changes
Python 3.11's interpreter now uses statically allocated core modules, eliminating the multi-step loading process we've dealt with in 3.10. This means your applications will start 10-15% faster out of the gate.
>> Function Optimization
The redesigned frame objects are a thing of beauty - they've been stripped of unnecessary baggage, resulting in a 3-7% speedup for all function calls. But it gets better: function calls are now inlined, giving us a 1-3% boost, with recursive functions like Fibonacci seeing up to 1.7x improvement!
>> Adaptive Intelligence
The new Specializing Interpreter is perhaps the most exciting addition. Think of it as a lightweight JIT - it identifies hot code paths and optimizes them automatically.
The interpreter now automatically specializes math operations, array indexing, and even sequence unpacking based on actual usage patterns.
>> Exception Handling Revolution
My favorite feature? Zero-cost exceptions! Your try-except blocks no longer carry overhead when no exceptions occur. The code runs at full speed until an exception actually happens.
Ready to make the switch? These improvements aren't just numbers - they're real-world performance gains waiting to be unlocked in your codebase. | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png",
"fullname": "Kuldeep Singh Sidhu",
"name": "singhsidhukuldeep",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 197,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/1nO-0Nzi5sNg9utb5tcSO.jpeg"
}
] | [] | [
{
"reaction": "👍",
"users": [
"cocodark",
"John6666",
"Joseph717171",
"AtAndDev",
"mantrakp"
],
"count": 5
}
] | 2024-11-09T04:41:47.000Z | 2024-11-09T04:41:47.987Z | [] | /posts/singhsidhukuldeep/582462020511746 | 2,247 | 0 |
982596282424877 | [
{
"type": "text",
"value": "Dear AI developers, have you managed to get any grants or compute powers for your research or startup?",
"raw": "Dear AI developers, have you managed to get any grants or compute powers for your research or startup?",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "If yes, from where and how? ",
"raw": "If yes, from where and how? ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Dear AI developers, have you managed to get any grants or compute powers for your research or startup?
If yes, from where and how? | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/637251142f98dcc049b349de/kkRLjyaO55_nFrTNWRZFQ.jpeg",
"fullname": "Haghiri",
"name": "Muhammadreza",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 27,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-11-08T20:46:32.000Z | 2024-11-10T08:20:56.103Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/637251142f98dcc049b349de/kkRLjyaO55_nFrTNWRZFQ.jpeg",
"fullname": "Haghiri",
"name": "Muhammadreza",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 27,
"isFollowing": false
}
] | /posts/Muhammadreza/982596282424877 | 1,478 | 8 |
508581844814759 | [
{
"type": "text",
"value": "FLUX 1.1 [pro] Ultra : API - { 4x Higher Image Resolutions } ",
"raw": "FLUX 1.1 [pro] Ultra : API - { 4x Higher Image Resolutions } ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Upto - 4 Megapixels, 10 seconds per sample. { Hi - Res } ",
"raw": "Upto - 4 Megapixels, 10 seconds per sample. { Hi - Res } ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "{ Blog Post ⛵ } : ",
"raw": "{ Blog Post ⛵ } : ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/prithivMLmods/flux-pro-endpoint",
"resource": null,
"url": null,
"href": "https://huggingface.co/blog/prithivMLmods/flux-pro-endpoint",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Endpoint Creation Step by Step: 🧵",
"raw": "Endpoint Creation Step by Step: 🧵",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "-> Sign up to { api.bfl.ml } & get your api's: ",
"raw": "-> Sign up to { api.bfl.ml } & get your api's: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://api.bfl.ml/auth/profile",
"resource": null,
"url": null,
"href": "https://api.bfl.ml/auth/profile",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "-> File Structure:",
"raw": "-> File Structure:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "flux_image_generation/",
"raw": "flux_image_generation/",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "├── .env ",
"raw": "├── .env ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "├── generate_image.py ",
"raw": "├── generate_image.py ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "└── requirements.txt ",
"raw": "└── requirements.txt ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "-> Step 0: Add Your API Key to an Environment File",
"raw": "-> Step 0: Add Your API Key to an Environment File",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "{ .env }",
"raw": "{ .env }",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "code_fence",
"value": null,
"raw": "```\nBFL_API_KEY=your_actual_api_key_here\n```",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": "BFL_API_KEY=your_actual_api_key_here",
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "-> Step 1: Install Required Libraries",
"raw": "-> Step 1: Install Required Libraries",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " { requirements.txt }",
"raw": " { requirements.txt }",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "code_fence",
"value": null,
"raw": "```\nrequests\npython-dotenv\n```",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": "requests\npython-dotenv",
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "-> Step 2: Setup the Python Script ",
"raw": "-> Step 2: Setup the Python Script ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "{ generate_image.py} - ",
"raw": "{ generate_image.py} - ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/PRITHIVSAKTHIUR/Flux-API/blob/main/generate_image.py",
"resource": null,
"url": null,
"href": "https://github.com/PRITHIVSAKTHIUR/Flux-API/blob/main/generate_image.py",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "-> Step3: Install the requirements & Run the Script",
"raw": "-> Step3: Install the requirements & Run the Script",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "code_fence",
"value": null,
"raw": "```\npip install -r requirements.txt\n\npython generate_image.py\n\n```",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": "pip install -r requirements.txt\n\npython generate_image.py",
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "-> Polling: The script polls the API every 0.5 seconds until the image generation result is ready. That's it the script also checks for a successful response after submitting the request.",
"raw": "-> Polling: The script polls the API every 0.5 seconds until the image generation result is ready. That's it the script also checks for a successful response after submitting the request.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "For more visit: ",
"raw": "For more visit: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🔺for script: ",
"raw": "🔺for script: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/PRITHIVSAKTHIUR/Flux-API/tree/main",
"resource": null,
"url": null,
"href": "https://github.com/PRITHIVSAKTHIUR/Flux-API/tree/main",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🔺bfl doc: ",
"raw": "🔺bfl doc: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://docs.bfl.ml/quick_start/gen_image/#__tabbed_1_2",
"resource": null,
"url": null,
"href": "https://docs.bfl.ml/quick_start/gen_image/#__tabbed_1_2",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Endpoints for image generation: 🧵",
"raw": "Endpoints for image generation: 🧵",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "-> /flux-pro-1.1-ultra",
"raw": "-> /flux-pro-1.1-ultra",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "-> /flux-pro-1.1",
"raw": "-> /flux-pro-1.1",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "-> /flux-pro",
"raw": "-> /flux-pro",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "-> /flux-dev",
"raw": "-> /flux-dev",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Each ID has 50 free credits available for use, based on the cost per image sample generated by the model.",
"raw": "Each ID has 50 free credits available for use, based on the cost per image sample generated by the model.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": ".",
"raw": ".",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": ".",
"raw": ".",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": ".",
"raw": ".",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "mention",
"value": null,
"raw": "@prithivMLmods",
"resource": null,
"url": null,
"href": null,
"user": "prithivMLmods",
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " 🤗",
"raw": " 🤗",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | FLUX 1.1 [pro] Ultra : API - { 4x Higher Image Resolutions }
Upto - 4 Megapixels, 10 seconds per sample. { Hi - Res }
{ Blog Post ⛵ } : https://huggingface.co/blog/prithivMLmods/flux-pro-endpoint
Endpoint Creation Step by Step: 🧵
-> Sign up to { api.bfl.ml } & get your api's: https://api.bfl.ml/auth/profile
-> File Structure:
flux_image_generation/
├── .env
├── generate_image.py
└── requirements.txt
-> Step 0: Add Your API Key to an Environment File
{ .env }
```
BFL_API_KEY=your_actual_api_key_here
```
-> Step 1: Install Required Libraries
{ requirements.txt }
```
requests
python-dotenv
```
-> Step 2: Setup the Python Script
{ generate_image.py} - https://github.com/PRITHIVSAKTHIUR/Flux-API/blob/main/generate_image.py
-> Step3: Install the requirements & Run the Script
```
pip install -r requirements.txt
python generate_image.py
```
-> Polling: The script polls the API every 0.5 seconds until the image generation result is ready. That's it the script also checks for a successful response after submitting the request.
For more visit:
🔺for script: https://github.com/PRITHIVSAKTHIUR/Flux-API/tree/main
🔺bfl doc: https://docs.bfl.ml/quick_start/gen_image/#__tabbed_1_2
Endpoints for image generation: 🧵
-> /flux-pro-1.1-ultra
-> /flux-pro-1.1
-> /flux-pro
-> /flux-dev
Each ID has 50 free credits available for use, based on the cost per image sample generated by the model.
.
.
.
@prithivMLmods 🤗 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65bb837dbfb878f46c77de4c/UVtVbF_3rdt0DC8xTkpL1.jpeg",
"fullname": "Prithiv Sakthi",
"name": "prithivMLmods",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 342,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/qRIJH_uHtZ5-8tkcxjnvX.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/hbhvRYOlsB00zUrJw3Hi8.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/Ap_JUy24MMyV5nN9bQ4m0.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/LFYE2x8EHwYc9oc7ItDLY.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/xFaYGOI8JfU-VsCO4fuq5.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/XmVllJJdI1i3hO4wWkUJm.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/Wfvv2XWvV0O3qsDnGG8c5.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/v3kIfimWuakx7lOIKz8RV.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/AAJOEcwuqxsgIukmQfWi8.jpeg"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65bb837dbfb878f46c77de4c/UVtVbF_3rdt0DC8xTkpL1.jpeg",
"fullname": "Prithiv Sakthi",
"name": "prithivMLmods",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 342
}
] | [
{
"reaction": "❤️",
"users": [
"hypergod",
"darksfx",
"ai4life44",
"rdrede",
"prithivMLmods",
"Fl00w",
"AtAndDev",
"lena-ashlyhan",
"Ngrthm",
"RenderIo"
],
"count": 10
},
{
"reaction": "👀",
"users": [
"darksfx",
"John6666",
"hypergod",
"AtAndDev",
"Ngrthm"
],
"count": 5
},
{
"reaction": "🤝",
"users": [
"rdrede",
"darksfx",
"AtAndDev",
"Ngrthm",
"RenderIo"
],
"count": 5
},
{
"reaction": "🔥",
"users": [
"rdrede",
"ai4life44",
"darksfx",
"AtAndDev"
],
"count": 4
},
{
"reaction": "👍",
"users": [
"rdrede",
"den0620",
"AtAndDev"
],
"count": 3
},
{
"reaction": "🧠",
"users": [
"darksfx",
"AtAndDev",
"Ngrthm"
],
"count": 3
}
] | 2024-11-08T18:36:14.000Z | 2024-11-09T08:16:27.921Z | [] | /posts/prithivMLmods/508581844814759 | 3,094 | 0 |
399142827468129 | [
{
"type": "text",
"value": "Exciting to see open-source models thriving in the computer agent space! 🔥 ",
"raw": "Exciting to see open-source models thriving in the computer agent space! 🔥 ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "I just built a demo for OS-ATLAS: A Foundation Action Model For Generalist GUI Agents — check it out here: ",
"raw": "I just built a demo for OS-ATLAS: A Foundation Action Model For Generalist GUI Agents — check it out here: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/maxiw/OS-ATLAS",
"resource": {
"type": "space",
"id": "maxiw/OS-ATLAS",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/maxiw/OS-ATLAS",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "This demo predicts bounding boxes based on screenshot + instructions as input.",
"raw": "This demo predicts bounding boxes based on screenshot + instructions as input.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Exciting to see open-source models thriving in the computer agent space! 🔥
I just built a demo for OS-ATLAS: A Foundation Action Model For Generalist GUI Agents — check it out here: https://huggingface.co/spaces/maxiw/OS-ATLAS
This demo predicts bounding boxes based on screenshot + instructions as input. | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6313a26b2c7ffdd9f50187ed/MTBOHg2bMcuOMWFLCZ86L.png",
"fullname": "Maxi",
"name": "maxiw",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 48,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6313a26b2c7ffdd9f50187ed/tq69ET2VJBq-7yh_HtdMz.png"
}
] | [] | [
{
"reaction": "👍",
"users": [
"venkat-m",
"Fractally-Driven-00",
"numbmelon",
"Symbol-LLM",
"programmnix-askui"
],
"count": 5
},
{
"reaction": "👀",
"users": [
"John6666",
"venkat-m"
],
"count": 2
},
{
"reaction": "🚀",
"users": [
"Symbol-LLM"
],
"count": 1
}
] | 2024-11-08T17:31:49.000Z | 2024-11-08T17:32:46.750Z | [] | /posts/maxiw/399142827468129 | 1,664 | 0 |
262024781392906 | [
{
"type": "text",
"value": "I will be giving a tutorial at AAAI 2025! Quite excited to share the recent advancements in the field and my contributions to it! ",
"raw": "I will be giving a tutorial at AAAI 2025! Quite excited to share the recent advancements in the field and my contributions to it! ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Stay tuned for more updates. ",
"raw": "Stay tuned for more updates. ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Link: ",
"raw": "Link: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://x.com/EzgiKorkmazAI/status/1854525141897671111",
"resource": null,
"url": null,
"href": "https://x.com/EzgiKorkmazAI/status/1854525141897671111",
"user": null,
"lang": null,
"code": null,
"label": null
}
] | I will be giving a tutorial at AAAI 2025! Quite excited to share the recent advancements in the field and my contributions to it!
Stay tuned for more updates.
Link: https://x.com/EzgiKorkmazAI/status/1854525141897671111 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/667c1a5acb6800a191024eb9/AqL8mQZsZjpZKi9FxtkIH.png",
"fullname": "Ezgi Korkmaz",
"name": "ezgikorkmaz",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 32,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"umarigan",
"Sri-Vigneshwar-DJ",
"ai-everyday",
"John6666",
"MichaelBrown123",
"AtAndDev"
],
"count": 6
},
{
"reaction": "❤️",
"users": [
"prithivMLmods",
"AtAndDev"
],
"count": 2
},
{
"reaction": "🚀",
"users": [
"AtAndDev"
],
"count": 1
},
{
"reaction": "🧠",
"users": [
"AtAndDev"
],
"count": 1
}
] | 2024-11-08T12:51:53.000Z | 2024-11-08T12:51:53.288Z | [] | /posts/ezgikorkmaz/262024781392906 | 2,463 | 0 |
855529323006849 | [
{
"type": "text",
"value": "INTRODUCING Hugging Face AutoTrain Client 🔥",
"raw": "INTRODUCING Hugging Face AutoTrain Client 🔥",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Fine-tuning models got even easier!!!! ",
"raw": "Fine-tuning models got even easier!!!! ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Now you can fine-tune SOTA models on all compatible dataset-model pairs on Hugging Face Hub using Python on Hugging Face Servers. Choose from a number of GPU flavors, millions of models and dataset pairs and 10+ tasks 🤗",
"raw": "Now you can fine-tune SOTA models on all compatible dataset-model pairs on Hugging Face Hub using Python on Hugging Face Servers. Choose from a number of GPU flavors, millions of models and dataset pairs and 10+ tasks 🤗",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "To try, install autotrain-advanced using pip. You can ignore dependencies and install without --no-deps and then you'd need to install some dependencies by hand.",
"raw": "To try, install autotrain-advanced using pip. You can ignore dependencies and install without --no-deps and then you'd need to install some dependencies by hand.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "\"pip install autotrain-advanced\"",
"raw": "\"pip install autotrain-advanced\"",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Github repo: ",
"raw": "Github repo: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/huggingface/autotrain-advanced",
"resource": null,
"url": null,
"href": "https://github.com/huggingface/autotrain-advanced",
"user": null,
"lang": null,
"code": null,
"label": null
}
] | INTRODUCING Hugging Face AutoTrain Client 🔥
Fine-tuning models got even easier!!!!
Now you can fine-tune SOTA models on all compatible dataset-model pairs on Hugging Face Hub using Python on Hugging Face Servers. Choose from a number of GPU flavors, millions of models and dataset pairs and 10+ tasks 🤗
To try, install autotrain-advanced using pip. You can ignore dependencies and install without --no-deps and then you'd need to install some dependencies by hand.
"pip install autotrain-advanced"
Github repo: https://github.com/huggingface/autotrain-advanced | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5fa19f4ba13e063b8b2b5e11/nGVHdTYX2udnt-K8mqY27.jpeg",
"fullname": "Abhishek Thakur",
"name": "abhishek",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 1379,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/5fa19f4ba13e063b8b2b5e11/4lrqXuisNXqN9TndDx1Wc.jpeg"
}
] | [] | [
{
"reaction": "🔥",
"users": [
"prithivMLmods",
"Sri-Vigneshwar-DJ",
"Csplk",
"John6666",
"AdamLucek",
"Muhammadreza",
"ThijsL202",
"Joseph717171",
"varunpant",
"fffiloni",
"Reality123b",
"Norod78",
"s3nh",
"ucyang",
"Ashish08",
"mk230580",
"louisbrulenaudet",
"ethanker"
],
"count": 18
},
{
"reaction": "👍",
"users": [
"abhishek",
"majid5776",
"Joseph717171",
"chiminaca",
"Reality123b",
"fuzzy-mittenz",
"ethanker"
],
"count": 7
},
{
"reaction": "❤️",
"users": [
"VincentGOURBIN",
"Reality123b",
"MichielBontenbal",
"ethanker"
],
"count": 4
}
] | 2024-11-08T10:38:17.000Z | 2024-11-13T19:27:43.102Z | [
{
"avatarUrl": "/avatars/9cc85a9f76d6f9c08568bced58070af1.svg",
"fullname": "Daniel Rabben",
"name": "PrinzPesia",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5fa19f4ba13e063b8b2b5e11/nGVHdTYX2udnt-K8mqY27.jpeg",
"fullname": "Abhishek Thakur",
"name": "abhishek",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 1379,
"isFollowing": false
}
] | /posts/abhishek/855529323006849 | 4,853 | 6 |
686127210179497 | [
{
"type": "text",
"value": "𝗔𝗻𝗱𝗿𝗼𝗶𝗱𝗟𝗮𝗯: 𝗙𝗶𝗿𝘀𝘁 𝗲𝘃𝗲𝗿 𝘀𝘆𝘀𝘁𝗲𝗺𝗮𝘁𝗶𝗰 𝗯𝗲𝗻𝗰𝗵𝗺𝗮𝗿𝗸 𝗳𝗼𝗿 𝗔𝗻𝗱𝗿𝗼𝗶𝗱 𝗺𝗼𝗯𝗶𝗹𝗲 𝗮𝗴𝗲𝗻𝘁𝘀 𝘀𝗵𝗼𝘄𝘀 𝘁𝗵𝗮𝘁 𝘀𝗺𝗮𝗹𝗹, 𝗳𝗶𝗻𝗲-𝘁𝘂𝗻𝗲𝗱 𝗼𝗽𝗲𝗻 𝗺𝗼𝗱𝗲𝗹𝘀 𝗰𝗮𝗻 𝗽𝗼𝘄𝗲𝗿 𝗮 𝗝𝗔𝗥𝗩𝗜𝗦 𝘀𝘆𝘀𝘁𝗲𝗺 𝗼𝗻 𝘆𝗼𝘂𝗿 𝘀𝗺𝗮𝗿𝘁𝗽𝗵𝗼𝗻𝗲 📱🔥",
"raw": "𝗔𝗻𝗱𝗿𝗼𝗶𝗱𝗟𝗮𝗯: 𝗙𝗶𝗿𝘀𝘁 𝗲𝘃𝗲𝗿 𝘀𝘆𝘀𝘁𝗲𝗺𝗮𝘁𝗶𝗰 𝗯𝗲𝗻𝗰𝗵𝗺𝗮𝗿𝗸 𝗳𝗼𝗿 𝗔𝗻𝗱𝗿𝗼𝗶𝗱 𝗺𝗼𝗯𝗶𝗹𝗲 𝗮𝗴𝗲𝗻𝘁𝘀 𝘀𝗵𝗼𝘄𝘀 𝘁𝗵𝗮𝘁 𝘀𝗺𝗮𝗹𝗹, 𝗳𝗶𝗻𝗲-𝘁𝘂𝗻𝗲𝗱 𝗼𝗽𝗲𝗻 𝗺𝗼𝗱𝗲𝗹𝘀 𝗰𝗮𝗻 𝗽𝗼𝘄𝗲𝗿 𝗮 𝗝𝗔𝗥𝗩𝗜𝗦 𝘀𝘆𝘀𝘁𝗲𝗺 𝗼𝗻 𝘆𝗼𝘂𝗿 𝘀𝗺𝗮𝗿𝘁𝗽𝗵𝗼𝗻𝗲 📱🔥",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "A team from Tsinghua University just released AndroidLab, the first systematic framework to evaluate and train Android mobile agents that works with both text-only and multimodal models.",
"raw": "A team from Tsinghua University just released AndroidLab, the first systematic framework to evaluate and train Android mobile agents that works with both text-only and multimodal models.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "They show that fine-tuning small open-source models can significantly boost performance, matching that of much bigger closed models like GPT-4o.",
"raw": "They show that fine-tuning small open-source models can significantly boost performance, matching that of much bigger closed models like GPT-4o.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "The team built:",
"raw": "The team built:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "📊 A reproducible benchmark with 138 tasks across 9 apps to evaluate mobile agents systematically",
"raw": "📊 A reproducible benchmark with 138 tasks across 9 apps to evaluate mobile agents systematically",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "📝📱 A framework supporting both text-only (via XML) and visual (via marked screenshots) interfaces",
"raw": "📝📱 A framework supporting both text-only (via XML) and visual (via marked screenshots) interfaces",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "✅ An instruction dataset of 10.5k operation traces for training mobile agents",
"raw": "✅ An instruction dataset of 10.5k operation traces for training mobile agents",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Key insights:",
"raw": "Key insights:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- 📈 Fine-tuning improves performance BY A LOT: Open-source model Llama-3.1-8B improves from 2% to 24% success rate after training, nearly reaching GPT-4o performance although it’s much smaller",
"raw": "- 📈 Fine-tuning improves performance BY A LOT: Open-source model Llama-3.1-8B improves from 2% to 24% success rate after training, nearly reaching GPT-4o performance although it’s much smaller",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- ⚙️ Text-only agents match multimodal ones: XML-based agents achieve similar performance to screenshot-based multimodal agents.",
"raw": "- ⚙️ Text-only agents match multimodal ones: XML-based agents achieve similar performance to screenshot-based multimodal agents.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Read their paper here 👉 ",
"raw": "Read their paper here 👉 ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2410.24024",
"resource": {
"type": "paper",
"id": "2410.24024",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2410.24024",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": "AndroidLab: Training and Systematic Benchmarking of Android Autonomous\n Agents (2410.24024)"
}
] | 𝗔𝗻𝗱𝗿𝗼𝗶𝗱𝗟𝗮𝗯: 𝗙𝗶𝗿𝘀𝘁 𝗲𝘃𝗲𝗿 𝘀𝘆𝘀𝘁𝗲𝗺𝗮𝘁𝗶𝗰 𝗯𝗲𝗻𝗰𝗵𝗺𝗮𝗿𝗸 𝗳𝗼𝗿 𝗔𝗻𝗱𝗿𝗼𝗶𝗱 𝗺𝗼𝗯𝗶𝗹𝗲 𝗮𝗴𝗲𝗻𝘁𝘀 𝘀𝗵𝗼𝘄𝘀 𝘁𝗵𝗮𝘁 𝘀𝗺𝗮𝗹𝗹, 𝗳𝗶𝗻𝗲-𝘁𝘂𝗻𝗲𝗱 𝗼𝗽𝗲𝗻 𝗺𝗼𝗱𝗲𝗹𝘀 𝗰𝗮𝗻 𝗽𝗼𝘄𝗲𝗿 𝗮 𝗝𝗔𝗥𝗩𝗜𝗦 𝘀𝘆𝘀𝘁𝗲𝗺 𝗼𝗻 𝘆𝗼𝘂𝗿 𝘀𝗺𝗮𝗿𝘁𝗽𝗵𝗼𝗻𝗲 📱🔥
A team from Tsinghua University just released AndroidLab, the first systematic framework to evaluate and train Android mobile agents that works with both text-only and multimodal models.
They show that fine-tuning small open-source models can significantly boost performance, matching that of much bigger closed models like GPT-4o.
The team built:
📊 A reproducible benchmark with 138 tasks across 9 apps to evaluate mobile agents systematically
📝📱 A framework supporting both text-only (via XML) and visual (via marked screenshots) interfaces
✅ An instruction dataset of 10.5k operation traces for training mobile agents
Key insights:
- 📈 Fine-tuning improves performance BY A LOT: Open-source model Llama-3.1-8B improves from 2% to 24% success rate after training, nearly reaching GPT-4o performance although it’s much smaller
- ⚙️ Text-only agents match multimodal ones: XML-based agents achieve similar performance to screenshot-based multimodal agents.
Read their paper here 👉 https://huggingface.co/papers/2410.24024 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg",
"fullname": "Aymeric Roucher",
"name": "m-ric",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 476,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/2a7-lTwt-y7DOrWxACbhB.png"
}
] | [] | [
{
"reaction": "🚀",
"users": [
"appvoid",
"Joseph717171",
"umseeker",
"chuangxinlezhi",
"louisbrulenaudet"
],
"count": 5
},
{
"reaction": "👀",
"users": [
"John6666",
"Joseph717171"
],
"count": 2
}
] | 2024-11-08T09:44:06.000Z | 2024-11-08T09:44:06.960Z | [] | /posts/m-ric/686127210179497 | 1,601 | 0 |
125255943213001 | [
{
"type": "text",
"value": "Welcome Newcomers, Did you bring your towel?",
"raw": "Welcome Newcomers, Did you bring your towel?",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Welcome Newcomers, Did you bring your towel? | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6593502ca2607099284523db/13IfQE8qnJsjPXbOeGrLa.jpeg",
"fullname": "william marshall",
"name": "fuzzy-mittenz",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 16,
"isFollowing": false
} | [] | [] | [
{
"reaction": "😎",
"users": [
"John6666",
"Sri-Vigneshwar-DJ",
"Mrdesigner14",
"fuzzy-mittenz"
],
"count": 4
}
] | 2024-11-08T04:44:13.000Z | 2024-11-11T22:02:04.474Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6593502ca2607099284523db/13IfQE8qnJsjPXbOeGrLa.jpeg",
"fullname": "william marshall",
"name": "fuzzy-mittenz",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 16,
"isFollowing": false
}
] | /posts/fuzzy-mittenz/125255943213001 | 1,902 | 12 |
977432731590127 | [
{
"type": "text",
"value": "Quintet Drop : : 🤗",
"raw": "Quintet Drop : : 🤗",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "{ Flux LoRA DLC ⛵ } : ",
"raw": "{ Flux LoRA DLC ⛵ } : ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/prithivMLmods/FLUX-LoRA-DLC",
"resource": {
"type": "space",
"id": "prithivMLmods/FLUX-LoRA-DLC",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/prithivMLmods/FLUX-LoRA-DLC",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "-- Purple Dreamy",
"raw": "-- Purple Dreamy",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "{ pop of color } : ",
"raw": "{ pop of color } : ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/prithivMLmods/Purple-Dreamy-Flux-LoRA",
"resource": {
"type": "model",
"id": "prithivMLmods/Purple-Dreamy-Flux-LoRA",
"discussionNum": null
},
"url": "https://huggingface.co/prithivMLmods/Purple-Dreamy-Flux-LoRA",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "-- Golden Dust",
"raw": "-- Golden Dust",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "{ shimmer contrast } : ",
"raw": "{ shimmer contrast } : ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/prithivMLmods/Golden-Dust-Flux-LoRA",
"resource": {
"type": "model",
"id": "prithivMLmods/Golden-Dust-Flux-LoRA",
"discussionNum": null
},
"url": "https://huggingface.co/prithivMLmods/Golden-Dust-Flux-LoRA",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "-- Lime Green",
"raw": "-- Lime Green",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "{ depth to the composition } : ",
"raw": "{ depth to the composition } : ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/prithivMLmods/Lime-Green-Flux-LoRA",
"resource": {
"type": "model",
"id": "prithivMLmods/Lime-Green-Flux-LoRA",
"discussionNum": null
},
"url": "https://huggingface.co/prithivMLmods/Lime-Green-Flux-LoRA",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "-- Flare Strike",
"raw": "-- Flare Strike",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "{ Fractured Line } : ",
"raw": "{ Fractured Line } : ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/prithivMLmods/Fractured-Line-Flare",
"resource": {
"type": "model",
"id": "prithivMLmods/Fractured-Line-Flare",
"discussionNum": null
},
"url": "https://huggingface.co/prithivMLmods/Fractured-Line-Flare",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "-- Orange Chroma",
"raw": "-- Orange Chroma",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "{ studio lighting } : ",
"raw": "{ studio lighting } : ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/prithivMLmods/Orange-Chroma-Flux-LoRA",
"resource": {
"type": "model",
"id": "prithivMLmods/Orange-Chroma-Flux-LoRA",
"discussionNum": null
},
"url": "https://huggingface.co/prithivMLmods/Orange-Chroma-Flux-LoRA",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": ".",
"raw": ".",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": ".",
"raw": ".",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": ".",
"raw": ".",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "{ collection } : ",
"raw": "{ collection } : ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/prithivMLmods/flux-lora-collections-66dd5908be2206cfaa8519be",
"resource": {
"type": "collection",
"id": "prithivMLmods/flux-lora-collections-66dd5908be2206cfaa8519be",
"discussionNum": null
},
"url": "https://huggingface.co/collections/prithivMLmods/flux-lora-collections-66dd5908be2206cfaa8519be",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "mention",
"value": null,
"raw": "@prithivMLmods",
"resource": null,
"url": null,
"href": null,
"user": "prithivMLmods",
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Quintet Drop : : 🤗
{ Flux LoRA DLC ⛵ } : https://huggingface.co/spaces/prithivMLmods/FLUX-LoRA-DLC
-- Purple Dreamy
{ pop of color } : https://huggingface.co/prithivMLmods/Purple-Dreamy-Flux-LoRA
-- Golden Dust
{ shimmer contrast } : https://huggingface.co/prithivMLmods/Golden-Dust-Flux-LoRA
-- Lime Green
{ depth to the composition } : https://huggingface.co/prithivMLmods/Lime-Green-Flux-LoRA
-- Flare Strike
{ Fractured Line } : https://huggingface.co/prithivMLmods/Fractured-Line-Flare
-- Orange Chroma
{ studio lighting } : https://huggingface.co/prithivMLmods/Orange-Chroma-Flux-LoRA
.
.
.
{ collection } : https://huggingface.co/collections/prithivMLmods/flux-lora-collections-66dd5908be2206cfaa8519be
@prithivMLmods | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65bb837dbfb878f46c77de4c/UVtVbF_3rdt0DC8xTkpL1.jpeg",
"fullname": "Prithiv Sakthi",
"name": "prithivMLmods",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 342,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/NRizc0kiWpq6FOLUS5Ww7.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/is7nGajMX6v1W0c4BblgB.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/FZ_WdMpYEqz5mRjxQyqv5.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/iBo2xCNXqfYtV0osdHC1x.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/GHS_2CviIhMCrhHdnDOPi.webp"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/AHAbW7xTylhyKWP_irBPT.webp"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/a_LytOGBw0I-D2X7vJ3fZ.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/ztwn3e56Faha27MZZQbnX.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/eMqH3mxSIDGD-wzKHFVf9.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/7EtUge5vHeZU9vTithO8u.png"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65bb837dbfb878f46c77de4c/UVtVbF_3rdt0DC8xTkpL1.jpeg",
"fullname": "Prithiv Sakthi",
"name": "prithivMLmods",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 342
}
] | [
{
"reaction": "❤️",
"users": [
"darksfx",
"rdrede",
"AtAndDev",
"THEMOHY",
"John6666",
"hypergod",
"ai4life44",
"s3nh",
"Mrdesigner14",
"carlizor",
"BuiDoan",
"prithivMLmods",
"Ngrthm",
"RenderIo"
],
"count": 14
},
{
"reaction": "🔥",
"users": [
"darksfx",
"hypergod",
"ai4life44",
"rdrede",
"Mrdesigner14",
"Sri-Vigneshwar-DJ",
"Ngrthm",
"RenderIo"
],
"count": 8
},
{
"reaction": "👀",
"users": [
"darksfx",
"AtAndDev",
"rdrede",
"Ngrthm"
],
"count": 4
},
{
"reaction": "🧠",
"users": [
"Csplk",
"rdrede",
"Ngrthm"
],
"count": 3
},
{
"reaction": "👍",
"users": [
"rdrede",
"AtAndDev"
],
"count": 2
},
{
"reaction": "🤗",
"users": [
"ai4life44",
"darksfx"
],
"count": 2
},
{
"reaction": "😎",
"users": [
"Mrdesigner14",
"Davincci"
],
"count": 2
},
{
"reaction": "🤯",
"users": [
"Mrdesigner14",
"darksfx"
],
"count": 2
}
] | 2024-11-07T17:49:26.000Z | 2024-11-09T12:58:20.069Z | [] | /posts/prithivMLmods/977432731590127 | 4,579 | 0 |
315762755390370 | [
{
"type": "text",
"value": "Introducing miniclaus 1.5B, a tiny but powerful model. Trained with MagPie and based on Qwen2.5 1.5B model, it performs very well on many tasks scoring top on his category, with impressive results:",
"raw": "Introducing miniclaus 1.5B, a tiny but powerful model. Trained with MagPie and based on Qwen2.5 1.5B model, it performs very well on many tasks scoring top on his category, with impressive results:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "* MATH Hard 9.81",
"raw": "* MATH Hard 9.81",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "* MMLU-Pro 29.37",
"raw": "* MMLU-Pro 29.37",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "* GPQA 29.19",
"raw": "* GPQA 29.19",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "* MUSR 42.85",
"raw": "* MUSR 42.85",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "* BBH 42.04",
"raw": "* BBH 42.04",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Available already in the hub:",
"raw": "Available already in the hub:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/fblgit/miniclaus-qw1.5B-UNAMGS",
"resource": {
"type": "model",
"id": "fblgit/miniclaus-qw1.5B-UNAMGS",
"discussionNum": null
},
"url": "https://huggingface.co/fblgit/miniclaus-qw1.5B-UNAMGS",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Introducing miniclaus 1.5B, a tiny but powerful model. Trained with MagPie and based on Qwen2.5 1.5B model, it performs very well on many tasks scoring top on his category, with impressive results:
* MATH Hard 9.81
* MMLU-Pro 29.37
* GPQA 29.19
* MUSR 42.85
* BBH 42.04
Available already in the hub:
https://huggingface.co/fblgit/miniclaus-qw1.5B-UNAMGS | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6401c8c9f98fbc64bcd7dca1/MOSgc_mPbfUZ-354osy1v.png",
"fullname": "FBL",
"name": "fblgit",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 229,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-11-07T16:16:43.000Z | 2024-11-08T01:22:19.385Z | [] | /posts/fblgit/315762755390370 | 842 | 0 |
245330842661322 | [
{
"type": "text",
"value": "First fully multi-GPU supporting and very advanced batch image captioner APP with Gradio interface published (as far as i know first)",
"raw": "First fully multi-GPU supporting and very advanced batch image captioner APP with Gradio interface published (as far as i know first)",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Multi-GPU batch caption with JoyCaption. JoyCaption uses Meta-Llama-3.1–8B and google/siglip-so400m-patch14–384 and a fine tuned image captioning neural network.",
"raw": "Multi-GPU batch caption with JoyCaption. JoyCaption uses Meta-Llama-3.1–8B and google/siglip-so400m-patch14–384 and a fine tuned image captioning neural network.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Link : ",
"raw": "Link : ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://www.patreon.com/posts/110613301",
"resource": null,
"url": null,
"href": "https://www.patreon.com/posts/110613301",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Link for batch caption editor : ",
"raw": "Link for batch caption editor : ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://www.patreon.com/posts/108992085",
"resource": null,
"url": null,
"href": "https://www.patreon.com/posts/108992085",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Coding multi-gpu in Python and Torch and bitsandbytes was truly a challange.",
"raw": "Coding multi-gpu in Python and Torch and bitsandbytes was truly a challange.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Our APP uses JoyCaption image captioning fine tuned model.",
"raw": "Our APP uses JoyCaption image captioning fine tuned model.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Our APP supports bitsandbytes 4bit model loading as well even in multi GPU mode (9.5 GB VRAM)",
"raw": "Our APP supports bitsandbytes 4bit model loading as well even in multi GPU mode (9.5 GB VRAM)",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Tested on 8x RTX A6000 (cloud) and RTX 3090 TI + RTX 3060 (my PC)",
"raw": "Tested on 8x RTX A6000 (cloud) and RTX 3090 TI + RTX 3060 (my PC)",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "1-click to install on Windows, RunPod and Massed Compute",
"raw": "1-click to install on Windows, RunPod and Massed Compute",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Excellent caption quality, automatically distributes images into each GPU, lots of features. You can resume caption with skip captioned images option.",
"raw": "Excellent caption quality, automatically distributes images into each GPU, lots of features. You can resume caption with skip captioned images option.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "For full details checkout screenshots",
"raw": "For full details checkout screenshots",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | First fully multi-GPU supporting and very advanced batch image captioner APP with Gradio interface published (as far as i know first)
Multi-GPU batch caption with JoyCaption. JoyCaption uses Meta-Llama-3.1–8B and google/siglip-so400m-patch14–384 and a fine tuned image captioning neural network.
Link : https://www.patreon.com/posts/110613301
Link for batch caption editor : https://www.patreon.com/posts/108992085
Coding multi-gpu in Python and Torch and bitsandbytes was truly a challange.
Our APP uses JoyCaption image captioning fine tuned model.
Our APP supports bitsandbytes 4bit model loading as well even in multi GPU mode (9.5 GB VRAM)
Tested on 8x RTX A6000 (cloud) and RTX 3090 TI + RTX 3060 (my PC)
1-click to install on Windows, RunPod and Massed Compute
Excellent caption quality, automatically distributes images into each GPU, lots of features. You can resume caption with skip captioned images option.
For full details checkout screenshots | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672531901326-6345bd89fe134dfd7a0dba40.png",
"fullname": "Furkan Gözükara",
"name": "MonsterMMORPG",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 368,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/j_IIXWmyBq68baJdqbVI2.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/TEg0bphUSrABn_V8Vd-0M.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/onHgOvMiuxy-1fNGM-fEs.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/-XkjoUVwoV_QIMfByiGL5.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/9I5dhZD18znOTL26qXvy0.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/uvv8UYupkJHufkcRQ2hGD.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/SK7d6Ahwn1CM58zkZreM8.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/pZHyImocJv99ydkRmSnpG.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/xy5hu2DzPysHv-nD7alBa.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/fw0C6JJueHTxHhz9ToBnm.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/5SFEnWuQdACDFCxIlf3lR.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/OIerN42I3IdH-S_WM2hJo.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/wY_b1Jq1Iknm5LGxeyxIV.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/53IsD_yrHUUa2g_BhEIkl.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/Wb6REZjrsqB2yT4fIfs9Y.png"
}
] | [] | [
{
"reaction": "👀",
"users": [
"John6666",
"MonsterMMORPG",
"flflow"
],
"count": 3
},
{
"reaction": "🔥",
"users": [
"MonsterMMORPG",
"alielfilali01"
],
"count": 2
},
{
"reaction": "❤️",
"users": [
"MonsterMMORPG",
"alielfilali01"
],
"count": 2
},
{
"reaction": "🚀",
"users": [
"MonsterMMORPG"
],
"count": 1
},
{
"reaction": "🤗",
"users": [
"MonsterMMORPG"
],
"count": 1
},
{
"reaction": "😎",
"users": [
"MonsterMMORPG"
],
"count": 1
},
{
"reaction": "➕",
"users": [
"MonsterMMORPG"
],
"count": 1
},
{
"reaction": "🧠",
"users": [
"MonsterMMORPG"
],
"count": 1
},
{
"reaction": "👍",
"users": [
"MonsterMMORPG"
],
"count": 1
},
{
"reaction": "🤝",
"users": [
"MonsterMMORPG"
],
"count": 1
},
{
"reaction": "🤯",
"users": [
"MonsterMMORPG"
],
"count": 1
}
] | 2024-08-26T03:52:15.000Z | 2024-08-26T03:52:15.081Z | [] | /posts/MonsterMMORPG/245330842661322 | 1,300 | 0 |
382948840382526 | [
{
"type": "text",
"value": "Here's my favorite piece of the summer bias detection research project (paper coming in Sept). We trained BERT for token classification (multi-label), to identify:",
"raw": "Here's my favorite piece of the summer bias detection research project (paper coming in Sept). We trained BERT for token classification (multi-label), to identify:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Generalizations",
"raw": "- Generalizations",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Unfairness",
"raw": "- Unfairness",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Stereotypes",
"raw": "- Stereotypes",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "HF Space: ",
"raw": "HF Space: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/maximuspowers/bias-detection-ner",
"resource": {
"type": "space",
"id": "maximuspowers/bias-detection-ner",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/maximuspowers/bias-detection-ner",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Article on Training: ",
"raw": "Article on Training: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/maximuspowers/bias-entity-recognition",
"resource": null,
"url": null,
"href": "https://huggingface.co/blog/maximuspowers/bias-entity-recognition",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Pls reach out with ideas!! Lot's more info coming soon, our research group has workshops and a hackathon planned for launching this open source project. Thanks",
"raw": "Pls reach out with ideas!! Lot's more info coming soon, our research group has workshops and a hackathon planned for launching this open source project. Thanks",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Here's my favorite piece of the summer bias detection research project (paper coming in Sept). We trained BERT for token classification (multi-label), to identify:
- Generalizations
- Unfairness
- Stereotypes
HF Space: https://huggingface.co/spaces/maximuspowers/bias-detection-ner
Article on Training: https://huggingface.co/blog/maximuspowers/bias-entity-recognition
Pls reach out with ideas!! Lot's more info coming soon, our research group has workshops and a hackathon planned for launching this open source project. Thanks | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64a2cd7342b2a76a308b3daf/o6SV0ilIA1sov088MaN9j.jpeg",
"fullname": "Maximus Powers",
"name": "maximuspowers",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 9,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/64a2cd7342b2a76a308b3daf/fES1Uj9D5SXvdFnNUNtJw.png"
}
] | [] | [
{
"reaction": "👀",
"users": [
"John6666",
"gabrielmbmb",
"leeloolee",
"maywell",
"Joseph717171"
],
"count": 5
},
{
"reaction": "🚀",
"users": [
"xi0v",
"gabrielmbmb",
"gksriharsha",
"den0620",
"Joseph717171"
],
"count": 5
}
] | 2024-08-26T02:31:50.000Z | 2024-08-26T02:31:50.133Z | [] | /posts/maximuspowers/382948840382526 | 2,485 | 0 |
470630591690046 | [
{
"type": "text",
"value": "Question about LightEval 🤗:",
"raw": "Question about LightEval 🤗:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "I've been searching for an LLM evaluation suite that can, out-of-the-box, compare the outputs of a model(s) without any enhancements vs. the same model with better prompt engineering, vs. the same model with RAG vs. the same model with fine-tuning.",
"raw": "I've been searching for an LLM evaluation suite that can, out-of-the-box, compare the outputs of a model(s) without any enhancements vs. the same model with better prompt engineering, vs. the same model with RAG vs. the same model with fine-tuning.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "I unfortunately have not found a tool that fits my exact description, but of course I ran into LightEval.",
"raw": "I unfortunately have not found a tool that fits my exact description, but of course I ran into LightEval.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "A huge pain-point of building large-scale projects that use LLMs is that prior to building an MVP, it is difficult to evaluate whether better prompt engineering, or RAG, or fine-tuning, or some combination of all is needed for satisfactory LLM output in terms of the project's given use case. ",
"raw": "A huge pain-point of building large-scale projects that use LLMs is that prior to building an MVP, it is difficult to evaluate whether better prompt engineering, or RAG, or fine-tuning, or some combination of all is needed for satisfactory LLM output in terms of the project's given use case. ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Time and resources is then wasted R&D'ing exactly what LLM enhancements are needed. ",
"raw": "Time and resources is then wasted R&D'ing exactly what LLM enhancements are needed. ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "I believe an out-of-the-box solution to compare models w/ or w/out the aforementioned LLM enhancements could help teams of any size better decide what LLM enhancements are needed prior to building.",
"raw": "I believe an out-of-the-box solution to compare models w/ or w/out the aforementioned LLM enhancements could help teams of any size better decide what LLM enhancements are needed prior to building.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "I wanted to know if the LightEval team or Hugging Face in general is thinking about such a tool. ",
"raw": "I wanted to know if the LightEval team or Hugging Face in general is thinking about such a tool. ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Question about LightEval 🤗:
I've been searching for an LLM evaluation suite that can, out-of-the-box, compare the outputs of a model(s) without any enhancements vs. the same model with better prompt engineering, vs. the same model with RAG vs. the same model with fine-tuning.
I unfortunately have not found a tool that fits my exact description, but of course I ran into LightEval.
A huge pain-point of building large-scale projects that use LLMs is that prior to building an MVP, it is difficult to evaluate whether better prompt engineering, or RAG, or fine-tuning, or some combination of all is needed for satisfactory LLM output in terms of the project's given use case.
Time and resources is then wasted R&D'ing exactly what LLM enhancements are needed.
I believe an out-of-the-box solution to compare models w/ or w/out the aforementioned LLM enhancements could help teams of any size better decide what LLM enhancements are needed prior to building.
I wanted to know if the LightEval team or Hugging Face in general is thinking about such a tool. | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/658e0ca052dc1046ca974f64/MFnIlpoPPP3kRwMB1SGAo.png",
"fullname": "David Cody Taupo Lingan",
"name": "rizzware",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-08-25T22:20:05.000Z | 2024-08-26T15:28:55.779Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1678663263366-63e0eea7af523c37e5a77966.jpeg",
"fullname": "Nathan Habib",
"name": "SaylorTwift",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 96,
"isFollowing": false
}
] | /posts/rizzware/470630591690046 | 514 | 1 |
533581056792954 | [
{
"type": "text",
"value": "I developed a way to test very clearly whether or not a Transformers model can actually learn symbolic reasoning, or if LLM models are forever doomed to be offshoots of 'Socratic Parrots'. The results are in, undeniable proof that Transformers models CAN learn symbolic relationships. Undeniable proof that AI can learn its ABC's. Credit goes to myself, Claude, and ChatGPT. I would not be able to prove this without Claude or ChatGPT. ",
"raw": "I developed a way to test very clearly whether or not a Transformers model can actually learn symbolic reasoning, or if LLM models are forever doomed to be offshoots of 'Socratic Parrots'. The results are in, undeniable proof that Transformers models CAN learn symbolic relationships. Undeniable proof that AI can learn its ABC's. Credit goes to myself, Claude, and ChatGPT. I would not be able to prove this without Claude or ChatGPT. ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://www.youtube.com/watch?v=I8jHRgahRfY",
"resource": null,
"url": null,
"href": "https://www.youtube.com/watch?v=I8jHRgahRfY",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | I developed a way to test very clearly whether or not a Transformers model can actually learn symbolic reasoning, or if LLM models are forever doomed to be offshoots of 'Socratic Parrots'. The results are in, undeniable proof that Transformers models CAN learn symbolic relationships. Undeniable proof that AI can learn its ABC's. Credit goes to myself, Claude, and ChatGPT. I would not be able to prove this without Claude or ChatGPT.
https://www.youtube.com/watch?v=I8jHRgahRfY | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png",
"fullname": "Richard A Aragon",
"name": "TuringsSolutions",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 148,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"handfuloftitty",
"Nioi",
"Josephgflowers",
"alien1957",
"binatheis",
"rohitdavas",
"f0ster"
],
"count": 7
},
{
"reaction": "👀",
"users": [
"John6666",
"vdcapriles",
"f0ster"
],
"count": 3
},
{
"reaction": "❤️",
"users": [
"ijohn07",
"f0ster"
],
"count": 2
},
{
"reaction": "🔥",
"users": [
"f0ster"
],
"count": 1
}
] | 2024-08-25T18:47:32.000Z | 2024-08-25T18:47:32.453Z | [] | /posts/TuringsSolutions/533581056792954 | 2,267 | 0 |
220213812304604 | [
{
"type": "text",
"value": "florence-tool (",
"raw": "florence-tool (",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/bigdata-pw/florence-tool",
"resource": null,
"url": null,
"href": "https://github.com/bigdata-pw/florence-tool",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": ") now supports WebDataset! Check it out for efficient batch inference with Florence-2 models ",
"raw": ") now supports WebDataset! Check it out for efficient batch inference with Florence-2 models ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/microsoft/Florence-2-large",
"resource": {
"type": "model",
"id": "microsoft/Florence-2-large",
"discussionNum": null
},
"url": "https://huggingface.co/microsoft/Florence-2-large",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/microsoft/Florence-2-base",
"resource": {
"type": "model",
"id": "microsoft/Florence-2-base",
"discussionNum": null
},
"url": "https://huggingface.co/microsoft/Florence-2-base",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Currently running it myself on A40 with ",
"raw": "Currently running it myself on A40 with ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "inline_code",
"value": null,
"raw": "`CAPTION`",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": "CAPTION",
"label": null
},
{
"type": "text",
"value": " task and a streaming WebDataset @ 60k images/hour!",
"raw": " task and a streaming WebDataset @ 60k images/hour!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | florence-tool (https://github.com/bigdata-pw/florence-tool) now supports WebDataset! Check it out for efficient batch inference with Florence-2 models https://huggingface.co/microsoft/Florence-2-large https://huggingface.co/microsoft/Florence-2-base
Currently running it myself on A40 with `CAPTION` task and a streaming WebDataset @ 60k images/hour! | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/665074ded3e886a93d713e73/tTAkhsz2J-uEQAfSsRvAf.jpeg",
"fullname": "hlky",
"name": "hlky",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 17,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"John6666",
"ajibawa-2023",
"Whiteshadow12",
"osanseviero",
"KingNish"
],
"count": 5
}
] | 2024-08-25T18:28:26.000Z | 2024-08-25T18:28:55.804Z | [] | /posts/hlky/220213812304604 | 1,803 | 0 |
764372534002849 | [
{
"type": "text",
"value": " 📐 AI Math Equation Solver: Your Step-by-Step Solution Companion",
"raw": " 📐 AI Math Equation Solver: Your Step-by-Step Solution Companion",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Hello Hugging Face community! 👋 I'm excited to share my latest Space: the AI Math Equation Solver!",
"raw": "Hello Hugging Face community! 👋 I'm excited to share my latest Space: the AI Math Equation Solver!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " 🔍 What does it do?",
"raw": " 🔍 What does it do?",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "This Space uses the power of AI to solve math problems from images. Simply upload a picture of a math equation or problem, and the AI will provide a detailed, step-by-step solution. It's perfect for students, teachers, or anyone looking to understand complex mathematical concepts better.",
"raw": "This Space uses the power of AI to solve math problems from images. Simply upload a picture of a math equation or problem, and the AI will provide a detailed, step-by-step solution. It's perfect for students, teachers, or anyone looking to understand complex mathematical concepts better.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " 🧠 How does it work?",
"raw": " 🧠 How does it work?",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Backend: Utilizes the ",
"raw": "- Backend: Utilizes the ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "inline_code",
"value": null,
"raw": "`microsoft/Phi-3.5-vision-instruct`",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": "microsoft/Phi-3.5-vision-instruct",
"label": null
},
{
"type": "text",
"value": " model for image understanding and mathematical reasoning.",
"raw": " model for image understanding and mathematical reasoning.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Frontend: Built with Gradio for a clean, user-friendly interface.",
"raw": "- Frontend: Built with Gradio for a clean, user-friendly interface.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Features:",
"raw": "- Features:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " - Image upload for math problems",
"raw": " - Image upload for math problems",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " - Detailed step-by-step solutions",
"raw": " - Detailed step-by-step solutions",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " - Example problems to try instantly",
"raw": " - Example problems to try instantly",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🚀 Try it out! ",
"raw": "🚀 Try it out! ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/sagar007/phi-vision-math-assistant",
"resource": {
"type": "space",
"id": "sagar007/phi-vision-math-assistant",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/sagar007/phi-vision-math-assistant",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Visit the Space here: [Insert your Hugging Face Space URL]",
"raw": "Visit the Space here: [Insert your Hugging Face Space URL]",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "💡 Use cases:",
"raw": "💡 Use cases:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Students: Check your work or get help with homework",
"raw": "- Students: Check your work or get help with homework",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Teachers: Create detailed solution guides quickly",
"raw": "- Teachers: Create detailed solution guides quickly",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Tutors: Explain complex problems more effectively",
"raw": "- Tutors: Explain complex problems more effectively",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Self-learners: Understand new mathematical concepts",
"raw": "- Self-learners: Understand new mathematical concepts",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🛠️ Technical Details:",
"raw": "🛠️ Technical Details:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Model: microsoft/Phi-3.5-vision-instruct",
"raw": "- Model: microsoft/Phi-3.5-vision-instruct",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Libraries: transformers, Gradio, PyTorch",
"raw": "- Libraries: transformers, Gradio, PyTorch",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Optimizations: Uses Flash Attention for improved performance",
"raw": "- Optimizations: Uses Flash Attention for improved performance",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🤝 Contribute:",
"raw": "🤝 Contribute:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "This is an open project, and I welcome contributions! Whether it's improving the model, enhancing the UI, or adding new features, feel free to fork the project and submit your pull requests.",
"raw": "This is an open project, and I welcome contributions! Whether it's improving the model, enhancing the UI, or adding new features, feel free to fork the project and submit your pull requests.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " 📣 Feedback:",
"raw": " 📣 Feedback:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "I'd love to hear your thoughts! How are you using this Space? Any suggestions for improvements? Let me know in the comments below.",
"raw": "I'd love to hear your thoughts! How are you using this Space? Any suggestions for improvements? Let me know in the comments below.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Happy problem-solving! 🎉",
"raw": "Happy problem-solving! 🎉",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "#MachineLearning #AI #Mathematics #Education #HuggingFace",
"raw": "#MachineLearning #AI #Mathematics #Education #HuggingFace",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 📐 AI Math Equation Solver: Your Step-by-Step Solution Companion
Hello Hugging Face community! 👋 I'm excited to share my latest Space: the AI Math Equation Solver!
🔍 What does it do?
This Space uses the power of AI to solve math problems from images. Simply upload a picture of a math equation or problem, and the AI will provide a detailed, step-by-step solution. It's perfect for students, teachers, or anyone looking to understand complex mathematical concepts better.
🧠 How does it work?
- Backend: Utilizes the `microsoft/Phi-3.5-vision-instruct` model for image understanding and mathematical reasoning.
- Frontend: Built with Gradio for a clean, user-friendly interface.
- Features:
- Image upload for math problems
- Detailed step-by-step solutions
- Example problems to try instantly
🚀 Try it out! https://huggingface.co/spaces/sagar007/phi-vision-math-assistant
Visit the Space here: [Insert your Hugging Face Space URL]
💡 Use cases:
- Students: Check your work or get help with homework
- Teachers: Create detailed solution guides quickly
- Tutors: Explain complex problems more effectively
- Self-learners: Understand new mathematical concepts
🛠️ Technical Details:
- Model: microsoft/Phi-3.5-vision-instruct
- Libraries: transformers, Gradio, PyTorch
- Optimizations: Uses Flash Attention for improved performance
🤝 Contribute:
This is an open project, and I welcome contributions! Whether it's improving the model, enhancing the UI, or adding new features, feel free to fork the project and submit your pull requests.
📣 Feedback:
I'd love to hear your thoughts! How are you using this Space? Any suggestions for improvements? Let me know in the comments below.
Happy problem-solving! 🎉
#MachineLearning #AI #Mathematics #Education #HuggingFace | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a464cfe0de0c5c6d8b04a1/1gCs46R_bW9apQzLQUrn5.png",
"fullname": "Sagar pallai",
"name": "sagar007",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 8,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62a464cfe0de0c5c6d8b04a1/RSagSircWtcBt8iOvCnLt.png"
}
] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-08-25T10:24:40.000Z | 2024-08-25T10:24:40.714Z | [] | /posts/sagar007/764372534002849 | 690 | 0 |
637267804570877 | [
{
"type": "text",
"value": "🎮 Introducing a new dataset focused on Steam user game bans - ",
"raw": "🎮 Introducing a new dataset focused on Steam user game bans - ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/nyuuzyou/steambans",
"resource": {
"type": "dataset",
"id": "nyuuzyou/steambans",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/nyuuzyou/steambans",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Dataset highlights:",
"raw": "Dataset highlights:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- 476,694 Steam user profiles",
"raw": "- 476,694 Steam user profiles",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Emphasis on game ban data and account status",
"raw": "- Emphasis on game ban data and account status",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Each entry includes: Steam ID, profile URL, username, avatar, account creation date, visibility state, VAC bans, game bans, economy ban status, and days since last ban",
"raw": "- Each entry includes: Steam ID, profile URL, username, avatar, account creation date, visibility state, VAC bans, game bans, economy ban status, and days since last ban",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Covers a wide range of Steam users, from clean accounts to those with multiple bans",
"raw": "- Covers a wide range of Steam users, from clean accounts to those with multiple bans",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Data spans up to 2024",
"raw": "- Data spans up to 2024",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Perfect for researchers studying cheating in online games, anti-cheat effectiveness, and player behavior patterns in the face of bans.",
"raw": "Perfect for researchers studying cheating in online games, anti-cheat effectiveness, and player behavior patterns in the face of bans.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 🎮 Introducing a new dataset focused on Steam user game bans - https://huggingface.co/datasets/nyuuzyou/steambans
Dataset highlights:
- 476,694 Steam user profiles
- Emphasis on game ban data and account status
- Each entry includes: Steam ID, profile URL, username, avatar, account creation date, visibility state, VAC bans, game bans, economy ban status, and days since last ban
- Covers a wide range of Steam users, from clean accounts to those with multiple bans
- Data spans up to 2024
Perfect for researchers studying cheating in online games, anti-cheat effectiveness, and player behavior patterns in the face of bans. | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/643ac5d2e2b979ae6144d68c/Z7PCNopn4cQeAYnVJDoqG.png",
"fullname": "nyuuzyou",
"name": "nyuuzyou",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 58,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-08-25T03:06:59.000Z | 2024-08-25T03:07:13.575Z | [] | /posts/nyuuzyou/637267804570877 | 678 | 0 |
870683517909157 | [
{
"type": "text",
"value": "30 Features that Dramatically Improve LLM Performance - Part 1 ",
"raw": "30 Features that Dramatically Improve LLM Performance - Part 1 ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://mltblog.com/3Aq9iAb",
"resource": null,
"url": null,
"href": "https://mltblog.com/3Aq9iAb",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Many are ground-breaking innovations that make LLMs much faster and not prone to hallucinations. They reduce the cost, latency, and amount of computer resources (GPU, training) by several orders of magnitude. Some of them improve security, making your LLM more attractive to corporate clients. I introduced a few of these features in my previous article \"New Trends in LLM Architecture\". Now I offer a comprehensive list, based on the most recent developments.",
"raw": "Many are ground-breaking innovations that make LLMs much faster and not prone to hallucinations. They reduce the cost, latency, and amount of computer resources (GPU, training) by several orders of magnitude. Some of them improve security, making your LLM more attractive to corporate clients. I introduced a few of these features in my previous article \"New Trends in LLM Architecture\". Now I offer a comprehensive list, based on the most recent developments.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Read full article, learn about agentic LLMs, LLM routers, contextual tables, fast search, and more, at ",
"raw": "Read full article, learn about agentic LLMs, LLM routers, contextual tables, fast search, and more, at ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://mltblog.com/3Aq9iAb",
"resource": null,
"url": null,
"href": "https://mltblog.com/3Aq9iAb",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 30 Features that Dramatically Improve LLM Performance - Part 1 https://mltblog.com/3Aq9iAb
Many are ground-breaking innovations that make LLMs much faster and not prone to hallucinations. They reduce the cost, latency, and amount of computer resources (GPU, training) by several orders of magnitude. Some of them improve security, making your LLM more attractive to corporate clients. I introduced a few of these features in my previous article "New Trends in LLM Architecture". Now I offer a comprehensive list, based on the most recent developments.
Read full article, learn about agentic LLMs, LLM routers, contextual tables, fast search, and more, at https://mltblog.com/3Aq9iAb
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/669c89e98f2dbc203f9e74ab/higvnXEHeo_Ig2bgTpn47.png",
"fullname": "Vincent Granville",
"name": "vincentg64",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 17,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/669c89e98f2dbc203f9e74ab/IiWmlgJ5-kug61ueeKj3I.png"
}
] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-08-24T20:05:16.000Z | 2024-08-24T20:05:16.622Z | [] | /posts/vincentg64/870683517909157 | 639 | 0 |
776302898385976 | [
{
"type": "text",
"value": "**Exploring Realistic Emotional Depth in AI Language Models**",
"raw": "**Exploring Realistic Emotional Depth in AI Language Models**",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Language models, particularly those proprietary, often grapple with issues of censorship, which can limit their ability to engage authentically with users. Recognizing this, the open-source AI community has pioneered the development of language models that are less restrained, offering more candid interactions. However, even these models tend to maintain a veneer of neutrality or overly positive responses, which might not serve all users' needs, especially in contexts where emotional depth and relatability are crucial.",
"raw": "Language models, particularly those proprietary, often grapple with issues of censorship, which can limit their ability to engage authentically with users. Recognizing this, the open-source AI community has pioneered the development of language models that are less restrained, offering more candid interactions. However, even these models tend to maintain a veneer of neutrality or overly positive responses, which might not serve all users' needs, especially in contexts where emotional depth and relatability are crucial.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "To address this gap, I've curated a specialized dataset aimed at infusing language models with a more nuanced emotional spectrum, specifically targeting a darker, more introspective mood. This dataset, titled \"Dark Sentience\", is designed to complement existing datasets like RP (Role Play) and those focused on instruction following. It seeks to enhance the emotional intelligence of AI by exposing it to complex human emotions, including but not limited to:",
"raw": "To address this gap, I've curated a specialized dataset aimed at infusing language models with a more nuanced emotional spectrum, specifically targeting a darker, more introspective mood. This dataset, titled \"Dark Sentience\", is designed to complement existing datasets like RP (Role Play) and those focused on instruction following. It seeks to enhance the emotional intelligence of AI by exposing it to complex human emotions, including but not limited to:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- **Suicide**",
"raw": "- **Suicide**",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- **Depression**",
"raw": "- **Depression**",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- **Anxiety**",
"raw": "- **Anxiety**",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Trigger Warning: Please be advised that the content within this dataset deals with heavy and potentially distressing themes. ",
"raw": "Trigger Warning: Please be advised that the content within this dataset deals with heavy and potentially distressing themes. ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "The \"Dark Sentience\" dataset is now available for review and use at: ",
"raw": "The \"Dark Sentience\" dataset is now available for review and use at: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/Locutusque/Dark-Sentience",
"resource": {
"type": "dataset",
"id": "Locutusque/Dark-Sentience",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/Locutusque/Dark-Sentience",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": ". I encourage researchers, developers, and mental health professionals to explore how this resource can foster more genuine and supportive AI interactions.",
"raw": ". I encourage researchers, developers, and mental health professionals to explore how this resource can foster more genuine and supportive AI interactions.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | **Exploring Realistic Emotional Depth in AI Language Models**
Language models, particularly those proprietary, often grapple with issues of censorship, which can limit their ability to engage authentically with users. Recognizing this, the open-source AI community has pioneered the development of language models that are less restrained, offering more candid interactions. However, even these models tend to maintain a veneer of neutrality or overly positive responses, which might not serve all users' needs, especially in contexts where emotional depth and relatability are crucial.
To address this gap, I've curated a specialized dataset aimed at infusing language models with a more nuanced emotional spectrum, specifically targeting a darker, more introspective mood. This dataset, titled "Dark Sentience", is designed to complement existing datasets like RP (Role Play) and those focused on instruction following. It seeks to enhance the emotional intelligence of AI by exposing it to complex human emotions, including but not limited to:
- **Suicide**
- **Depression**
- **Anxiety**
Trigger Warning: Please be advised that the content within this dataset deals with heavy and potentially distressing themes.
The "Dark Sentience" dataset is now available for review and use at: https://huggingface.co/datasets/Locutusque/Dark-Sentience. I encourage researchers, developers, and mental health professionals to explore how this resource can foster more genuine and supportive AI interactions.
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/YeFyz1AZVcCRsyNHHtwJG.jpeg",
"fullname": "Sebastian Gabarain",
"name": "Locutusque",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 179,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"oneiroid",
"ajibawa-2023",
"Tonic"
],
"count": 3
},
{
"reaction": "😎",
"users": [
"John6666",
"Tonic"
],
"count": 2
},
{
"reaction": "🤗",
"users": [
"ijohn07",
"Tonic"
],
"count": 2
}
] | 2024-08-24T18:40:21.000Z | 2024-08-24T18:40:46.103Z | [] | /posts/Locutusque/776302898385976 | 2,045 | 0 |
239888761062211 | [
{
"type": "text",
"value": " 🔗 Neural Network (1 Byte explainer for everybody)",
"raw": " 🔗 Neural Network (1 Byte explainer for everybody)",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Just like our brain, a Neural Network is made up of interconnected \"neurons\". These neurons work together by learning from (input) data and getting better at tasks (in the hidden layer) to give (output) predictions or decisions.",
"raw": "Just like our brain, a Neural Network is made up of interconnected \"neurons\". These neurons work together by learning from (input) data and getting better at tasks (in the hidden layer) to give (output) predictions or decisions.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 🔗 Neural Network (1 Byte explainer for everybody)
Just like our brain, a Neural Network is made up of interconnected "neurons". These neurons work together by learning from (input) data and getting better at tasks (in the hidden layer) to give (output) predictions or decisions. | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64808a8c856901b0edb96245/UVa3ztQ8DRM47S8Rsk4Rz.jpeg",
"fullname": "John Johnson",
"name": "jjokah",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 8,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"jjokah",
"John6666",
"TensorTwist",
"nicolollo",
"mesut07",
"noorkhan777766"
],
"count": 6
}
] | 2024-08-24T17:23:54.000Z | 2024-08-24T17:23:54.389Z | [] | /posts/jjokah/239888761062211 | 1,860 | 0 |
714118730579762 | [
{
"type": "text",
"value": "🌟 Liger Kernel: Efficient Triton Kernels for LLM Training",
"raw": "🌟 Liger Kernel: Efficient Triton Kernels for LLM Training",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "LIGER \"is a [Hugging Face-compatible] collection of Triton kernels designed specifically for LLM training. It can effectively increase multi-GPU training throughput by 20% and reduces memory usage by 60%.\"",
"raw": "LIGER \"is a [Hugging Face-compatible] collection of Triton kernels designed specifically for LLM training. It can effectively increase multi-GPU training throughput by 20% and reduces memory usage by 60%.\"",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "GitHub: ",
"raw": "GitHub: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/linkedin/Liger-Kernel",
"resource": null,
"url": null,
"href": "https://github.com/linkedin/Liger-Kernel",
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 🌟 Liger Kernel: Efficient Triton Kernels for LLM Training
LIGER "is a [Hugging Face-compatible] collection of Triton kernels designed specifically for LLM training. It can effectively increase multi-GPU training throughput by 20% and reduces memory usage by 60%."
GitHub: https://github.com/linkedin/Liger-Kernel | {
"avatarUrl": "/avatars/f32291df2054c1bb4a01889d1b41c0d5.svg",
"fullname": "Christopher Schröder",
"name": "cschroeder",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 17,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666",
"osanseviero"
],
"count": 2
}
] | 2024-08-24T16:46:49.000Z | 2024-08-24T16:46:49.944Z | [] | /posts/cschroeder/714118730579762 | 675 | 0 |
515062801260073 | [
{
"type": "text",
"value": "a new shape-optimized SigLIP just dropped 👀 ",
"raw": "a new shape-optimized SigLIP just dropped 👀 ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/google/siglip-so400m-patch14-224/",
"resource": {
"type": "model",
"id": "google/siglip-so400m-patch14-224",
"discussionNum": null
},
"url": "https://huggingface.co/google/siglip-so400m-patch14-224/",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | a new shape-optimized SigLIP just dropped 👀 https://huggingface.co/google/siglip-so400m-patch14-224/ | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png",
"fullname": "Merve Noyan",
"name": "merve",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 5520,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"gokaygokay",
"John6666",
"KingNish",
"xi0v",
"Nithish310",
"ajibawa-2023",
"osanseviero"
],
"count": 7
},
{
"reaction": "🚀",
"users": [
"gokaygokay",
"John6666",
"xi0v",
"osanseviero"
],
"count": 4
},
{
"reaction": "🤗",
"users": [
"prithivMLmods"
],
"count": 1
},
{
"reaction": "😎",
"users": [
"ariG23498"
],
"count": 1
}
] | 2024-08-24T12:08:21.000Z | 2024-08-24T12:08:21.838Z | [] | /posts/merve/515062801260073 | 3,043 | 0 |
626373388165707 | [
{
"type": "text",
"value": " Looking for Generative AI trainer/speaker for AI accelerator program (Virtual/Online sessions).",
"raw": " Looking for Generative AI trainer/speaker for AI accelerator program (Virtual/Online sessions).",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "To get more context about the program, please visit the program landing page: ",
"raw": "To get more context about the program, please visit the program landing page: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://llamadesigndrive.com",
"resource": null,
"url": null,
"href": "https://llamadesigndrive.com",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "If you are interested, reach out at [email protected]",
"raw": "If you are interested, reach out at [email protected]",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Looking for Generative AI trainer/speaker for AI accelerator program (Virtual/Online sessions).
To get more context about the program, please visit the program landing page: https://llamadesigndrive.com
If you are interested, reach out at [email protected] | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65ddc2dd978ab6624db537f6/ZbI4M2Srg3Pbhoi_DLUpV.jpeg",
"fullname": "Sharhabeel Hamdan",
"name": "hamdanuk",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-08-24T11:48:50.000Z | 2024-08-24T16:38:52.177Z | [
{
"avatarUrl": "/avatars/7c96117109dd076ab802eeeb2c090a71.svg",
"fullname": "Matthew egbenede ogheneroro ",
"name": "Martech",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
}
] | /posts/hamdanuk/626373388165707 | 452 | 1 |
523508059853017 | [
{
"type": "text",
"value": "🎯 Ghost 8B Beta 1608: Empowering Your AI Assistant",
"raw": "🎯 Ghost 8B Beta 1608: Empowering Your AI Assistant",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "📦 Unlock the Power of Ghost 8B Beta 1608: Build Your Personal AI Companion",
"raw": "📦 Unlock the Power of Ghost 8B Beta 1608: Build Your Personal AI Companion",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Ghost 8B Beta 1608 empowers you to create a safe and multilingual AI assistant tailored to your needs, directly on your personal computer. 🧑💻 Leverage AI's capabilities within your own space! 🚀 Ghost 8B Beta 1608 is ready to become your AI companion.",
"raw": "Ghost 8B Beta 1608 empowers you to create a safe and multilingual AI assistant tailored to your needs, directly on your personal computer. 🧑💻 Leverage AI's capabilities within your own space! 🚀 Ghost 8B Beta 1608 is ready to become your AI companion.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "~",
"raw": "~",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "📦 개인용 AI 보조 도구로 Ghost 8B Beta 1608를 활용하세요!",
"raw": "📦 개인용 AI 보조 도구로 Ghost 8B Beta 1608를 활용하세요!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Ghost 8B Beta 1608, AI의 힘을 활용하여 안전하고 개인화된 언어 지원을 제공하는 AI 보조 도구를 직접 구축할 수 있습니다. 🧑💻 개인 컴퓨터에서 AI의 혜택을 누리세요! 🚀 Ghost 8B Beta 1608는 당신의 AI 파트너가 될 준비가 되어 있습니다.",
"raw": "Ghost 8B Beta 1608, AI의 힘을 활용하여 안전하고 개인화된 언어 지원을 제공하는 AI 보조 도구를 직접 구축할 수 있습니다. 🧑💻 개인 컴퓨터에서 AI의 혜택을 누리세요! 🚀 Ghost 8B Beta 1608는 당신의 AI 파트너가 될 준비가 되어 있습니다.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/lamhieu/ghost-8b-beta-8k",
"resource": {
"type": "space",
"id": "lamhieu/ghost-8b-beta-8k",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/lamhieu/ghost-8b-beta-8k",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/ghost-x/ghost-8b-beta-668ead6179f93be717db4542",
"resource": {
"type": "collection",
"id": "ghost-x/ghost-8b-beta-668ead6179f93be717db4542",
"discussionNum": null
},
"url": "https://huggingface.co/collections/ghost-x/ghost-8b-beta-668ead6179f93be717db4542",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 🎯 Ghost 8B Beta 1608: Empowering Your AI Assistant
📦 Unlock the Power of Ghost 8B Beta 1608: Build Your Personal AI Companion
Ghost 8B Beta 1608 empowers you to create a safe and multilingual AI assistant tailored to your needs, directly on your personal computer. 🧑💻 Leverage AI's capabilities within your own space! 🚀 Ghost 8B Beta 1608 is ready to become your AI companion.
~
📦 개인용 AI 보조 도구로 Ghost 8B Beta 1608를 활용하세요!
Ghost 8B Beta 1608, AI의 힘을 활용하여 안전하고 개인화된 언어 지원을 제공하는 AI 보조 도구를 직접 구축할 수 있습니다. 🧑💻 개인 컴퓨터에서 AI의 혜택을 누리세요! 🚀 Ghost 8B Beta 1608는 당신의 AI 파트너가 될 준비가 되어 있습니다.
https://huggingface.co/spaces/lamhieu/ghost-8b-beta-8k
https://huggingface.co/collections/ghost-x/ghost-8b-beta-668ead6179f93be717db4542
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/600ae38cc92b79f54efd4556/cSqRIslYl5L3I4WK3a31f.png",
"fullname": "Hieu Lam",
"name": "lamhieu",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 74,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/600ae38cc92b79f54efd4556/QseNBSRZxePf-RtXqXA83.png"
}
] | [] | [
{
"reaction": "👀",
"users": [
"John6666",
"ecyht2"
],
"count": 2
},
{
"reaction": "🔥",
"users": [
"lrq3000",
"KingNish"
],
"count": 2
},
{
"reaction": "👍",
"users": [
"brainhome"
],
"count": 1
}
] | 2024-08-24T11:42:45.000Z | 2024-08-24T11:58:01.502Z | [] | /posts/lamhieu/523508059853017 | 1,681 | 0 |
427716208274372 | [
{
"type": "text",
"value": "Introducing Voicee, A superfast voice fast assistant.",
"raw": "Introducing Voicee, A superfast voice fast assistant.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/KingNish/Voicee",
"resource": {
"type": "space",
"id": "KingNish/Voicee",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/KingNish/Voicee",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "It achieved latency <500 ms.",
"raw": "It achieved latency <500 ms.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "While its average latency is 700ms.",
"raw": "While its average latency is 700ms.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "It works best in Google Chrome.",
"raw": "It works best in Google Chrome.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Please try and give your feedbacks.",
"raw": "Please try and give your feedbacks.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Thank you. 🤗",
"raw": "Thank you. 🤗",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Introducing Voicee, A superfast voice fast assistant.
https://huggingface.co/spaces/KingNish/Voicee
It achieved latency <500 ms.
While its average latency is 700ms.
It works best in Google Chrome.
Please try and give your feedbacks.
Thank you. 🤗 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6612aedf09f16e7347dfa7e1/bPYjBXCedY_1fSIPjoBTY.jpeg",
"fullname": "Nishith Jain",
"name": "KingNish",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1072,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"Bruhn",
"John6666",
"m-ric",
"prithivMLmods",
"hamedj",
"ajibawa-2023",
"osanseviero",
"victor"
],
"count": 8
},
{
"reaction": "👀",
"users": [
"John6666",
"ParthSadaria"
],
"count": 2
}
] | 2024-08-24T10:56:33.000Z | 2024-09-08T13:01:04.528Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/656e3808d4de03a07d116850/JZh4lrjFueJZVqugjoloP.jpeg",
"fullname": "Kenneth Hamilton",
"name": "ZennyKenny",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 32,
"isFollowing": false
},
{
"avatarUrl": "/avatars/3b6849dd6733cb8d110a795cbebe9bce.svg",
"fullname": "Anton Rifco",
"name": "rifco",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "/avatars/bd5045f69d256bfb530169f6f6c9796c.svg",
"fullname": "Brian Hassan",
"name": "Anon61Iam",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
}
] | /posts/KingNish/427716208274372 | 3,586 | 3 |
223181170945582 | [
{
"type": "text",
"value": "Alan Turing's mind-bender: \"Can machines think?\" in its glorified form. This 74yr old paper laid the foundation for how we think about AI and machine intelligence today. The level of detail, clarity and foresight is just phenomenal - he was way ahead of his time 🧠🤖",
"raw": "Alan Turing's mind-bender: \"Can machines think?\" in its glorified form. This 74yr old paper laid the foundation for how we think about AI and machine intelligence today. The level of detail, clarity and foresight is just phenomenal - he was way ahead of his time 🧠🤖",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Original copy: ",
"raw": "Original copy: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://archive.org/details/MIND--COMPUTING-MACHINERY-AND-INTELLIGENCE",
"resource": null,
"url": null,
"href": "https://archive.org/details/MIND--COMPUTING-MACHINERY-AND-INTELLIGENCE",
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Alan Turing's mind-bender: "Can machines think?" in its glorified form. This 74yr old paper laid the foundation for how we think about AI and machine intelligence today. The level of detail, clarity and foresight is just phenomenal - he was way ahead of his time 🧠🤖
Original copy: https://archive.org/details/MIND--COMPUTING-MACHINERY-AND-INTELLIGENCE | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg",
"fullname": "Jaward Sesay",
"name": "Jaward",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 189,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/tdIi4quSXarCOTl-K3tb8.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/IikVjILH90eLYTzBSJ3En.png"
}
] | [] | [
{
"reaction": "👀",
"users": [
"John6666",
"m-ric",
"edison1"
],
"count": 3
},
{
"reaction": "👍",
"users": [
"dashfunnydashdash",
"KokuJP",
"dark-pen"
],
"count": 3
}
] | 2024-08-24T01:01:39.000Z | 2024-08-24T01:01:39.714Z | [] | /posts/Jaward/223181170945582 | 1,484 | 0 |
515587584811622 | [
{
"type": "text",
"value": "🌐 Check out the new dataset sourced from Fishki.net, one of the popular entertainment and news portals in the Russian Internet, known for its diverse content including humor, interesting facts, and viral stories - ",
"raw": "🌐 Check out the new dataset sourced from Fishki.net, one of the popular entertainment and news portals in the Russian Internet, known for its diverse content including humor, interesting facts, and viral stories - ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/nyuuzyou/fishkinet-posts",
"resource": {
"type": "dataset",
"id": "nyuuzyou/fishkinet-posts",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/nyuuzyou/fishkinet-posts",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": ".",
"raw": ".",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "📊 Dataset highlights:",
"raw": "📊 Dataset highlights:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- 369,180 posts",
"raw": "- 369,180 posts",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Includes original posts with titles, content, images, and metadata",
"raw": "- Includes original posts with titles, content, images, and metadata",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Each entry contains URL, title, author, date, tags, content, and image URLs",
"raw": "- Each entry contains URL, title, author, date, tags, content, and image URLs",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Primarily in Russian language",
"raw": "- Primarily in Russian language",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Covers a wide range of topics in entertainment, news, and social media content",
"raw": "- Covers a wide range of topics in entertainment, news, and social media content",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Spans nearly two decades of posts, likely from early 2000s to 2024",
"raw": "- Spans nearly two decades of posts, likely from early 2000s to 2024",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Dedicated to public domain under Creative Commons Zero (CC0) license",
"raw": "- Dedicated to public domain under Creative Commons Zero (CC0) license",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 🌐 Check out the new dataset sourced from Fishki.net, one of the popular entertainment and news portals in the Russian Internet, known for its diverse content including humor, interesting facts, and viral stories - https://huggingface.co/datasets/nyuuzyou/fishkinet-posts.
📊 Dataset highlights:
- 369,180 posts
- Includes original posts with titles, content, images, and metadata
- Each entry contains URL, title, author, date, tags, content, and image URLs
- Primarily in Russian language
- Covers a wide range of topics in entertainment, news, and social media content
- Spans nearly two decades of posts, likely from early 2000s to 2024
- Dedicated to public domain under Creative Commons Zero (CC0) license | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/643ac5d2e2b979ae6144d68c/Z7PCNopn4cQeAYnVJDoqG.png",
"fullname": "nyuuzyou",
"name": "nyuuzyou",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 58,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
},
{
"reaction": "🔥",
"users": [
"kristaller486"
],
"count": 1
}
] | 2024-08-23T22:26:46.000Z | 2024-08-23T22:26:46.262Z | [] | /posts/nyuuzyou/515587584811622 | 897 | 0 |
958062051696534 | [
{
"type": "text",
"value": "You can now use DoRA for your embedding layers!",
"raw": "You can now use DoRA for your embedding layers!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "PR: ",
"raw": "PR: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/huggingface/peft/pull/2006",
"resource": null,
"url": null,
"href": "https://github.com/huggingface/peft/pull/2006",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "I have documented my journey of this specific PR in a blog post for everyone to read. The highlight of the PR was when the first author of DoRA reviewed my code.",
"raw": "I have documented my journey of this specific PR in a blog post for everyone to read. The highlight of the PR was when the first author of DoRA reviewed my code.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Blog Post: ",
"raw": "Blog Post: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/ariG23498/peft-dora",
"resource": null,
"url": null,
"href": "https://huggingface.co/blog/ariG23498/peft-dora",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Huge thanks to ",
"raw": "Huge thanks to ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "mention",
"value": null,
"raw": "@BenjaminB",
"resource": null,
"url": null,
"href": null,
"user": "BenjaminB",
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " for all the help I needed.",
"raw": " for all the help I needed.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | You can now use DoRA for your embedding layers!
PR: https://github.com/huggingface/peft/pull/2006
I have documented my journey of this specific PR in a blog post for everyone to read. The highlight of the PR was when the first author of DoRA reviewed my code.
Blog Post: https://huggingface.co/blog/ariG23498/peft-dora
Huge thanks to @BenjaminB for all the help I needed. | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/608aabf24955d2bfc3cd99c6/T762Ut0Y-w0sZB2ynvfbJ.jpeg",
"fullname": "Aritra Roy Gosthipaty",
"name": "ariG23498",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 65,
"isFollowing": false
} | [] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1656685953025-62bf03d1e80cec527083cd66.jpeg",
"fullname": "Benjamin Bossan",
"name": "BenjaminB",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 33
}
] | [
{
"reaction": "🔥",
"users": [
"YaTharThShaRma999",
"John6666",
"OzzyGT",
"OxxoCodes",
"khanhduong241218",
"osanseviero",
"misba-code"
],
"count": 7
}
] | 2024-08-23T18:42:38.000Z | 2024-08-23T18:42:38.809Z | [] | /posts/ariG23498/958062051696534 | 1,606 | 0 |
503093709017134 | [
{
"type": "text",
"value": "I can't believe this... Phi-3.5-mini (3.8B) running in-browser at ~90 tokens/second on WebGPU w/ Transformers.js and ONNX Runtime Web! 🤯 Since everything runs 100% locally, no messages are sent to a server — a huge win for privacy!",
"raw": "I can't believe this... Phi-3.5-mini (3.8B) running in-browser at ~90 tokens/second on WebGPU w/ Transformers.js and ONNX Runtime Web! 🤯 Since everything runs 100% locally, no messages are sent to a server — a huge win for privacy!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- 🤗 Demo: ",
"raw": "- 🤗 Demo: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/webml-community/phi-3.5-webgpu",
"resource": {
"type": "space",
"id": "webml-community/phi-3.5-webgpu",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/webml-community/phi-3.5-webgpu",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- 🧑💻 Source code: ",
"raw": "- 🧑💻 Source code: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/huggingface/transformers.js-examples/tree/main/phi-3.5-webgpu",
"resource": null,
"url": null,
"href": "https://github.com/huggingface/transformers.js-examples/tree/main/phi-3.5-webgpu",
"user": null,
"lang": null,
"code": null,
"label": null
}
] | I can't believe this... Phi-3.5-mini (3.8B) running in-browser at ~90 tokens/second on WebGPU w/ Transformers.js and ONNX Runtime Web! 🤯 Since everything runs 100% locally, no messages are sent to a server — a huge win for privacy!
- 🤗 Demo: https://huggingface.co/spaces/webml-community/phi-3.5-webgpu
- 🧑💻 Source code: https://github.com/huggingface/transformers.js-examples/tree/main/phi-3.5-webgpu | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61b253b7ac5ecaae3d1efe0c/hwiQ0uvz3t-L5a-NtBIO6.png",
"fullname": "Joshua",
"name": "Xenova",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 3736,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/61b253b7ac5ecaae3d1efe0c/QUWyYmAATpcjIr9o41b8I.mp4"
}
] | [] | [
{
"reaction": "🔥",
"users": [
"cschroeder",
"DmitryRyumin",
"John6666",
"4rtemi5",
"DenyTranDFW",
"Rybens",
"YaTharThShaRma999",
"massimoavvisati",
"Bruhn",
"nguyenbh",
"DiamanteAmarelo",
"TheDrunkenSnail",
"not-lain",
"cfahlgren1",
"loubnabnl",
"Ramikan-BR",
"Omarito2412",
"revolunet",
"osanseviero",
"jweston",
"cahlen",
"captainspock",
"louisbrulenaudet",
"rmanoj",
"ethix",
"RojoXlon",
"jundialwan",
"ghosty-0",
"chrislhow",
"gianpaj",
"parksthecoder"
],
"count": 31
},
{
"reaction": "🚀",
"users": [
"jdspugh",
"DiamanteAmarelo",
"cfahlgren1",
"Ramikan-BR",
"GordonM",
"cahlen"
],
"count": 6
},
{
"reaction": "😔",
"users": [
"ZeroWw",
"cahlen"
],
"count": 2
},
{
"reaction": "❤️",
"users": [
"Ramikan-BR",
"cahlen"
],
"count": 2
},
{
"reaction": "👀",
"users": [
"Ramikan-BR",
"cahlen"
],
"count": 2
},
{
"reaction": "🤯",
"users": [
"ayouba"
],
"count": 1
}
] | 2024-08-23T15:14:41.000Z | 2024-11-04T17:14:09.378Z | [
{
"avatarUrl": "/avatars/54483699273ac58a4a6fe1fa4aab65fe.svg",
"fullname": "Robert Sinclair",
"name": "ZeroWw",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 75,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63119cc5af10c9efa1e9b620/RA-UgDNTPsF6j5uDnG3-N.jpeg",
"fullname": "Akarshan Biswas",
"name": "qnixsynapse",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 11,
"isFollowing": false
},
{
"avatarUrl": "/avatars/4b954dcff7739057e105c6e9019d7ca2.svg",
"fullname": "Nicki Gataro",
"name": "ceoofcapybaras",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2,
"isFollowing": false
},
{
"avatarUrl": "/avatars/866bd569f610f81a33ecf0d2077213bd.svg",
"fullname": "Ras",
"name": "Ke09876",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "/avatars/a996119e73a87724100e82babffe70ad.svg",
"fullname": "Jon Schlinkert",
"name": "jonschlinkert",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "/avatars/c8891564e64052c3af1c07d11e7d74bf.svg",
"fullname": "Nomi",
"name": "Agroni",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "/avatars/d9d2d692aef76c4131be177b693f109f.svg",
"fullname": "Gianfranco Palumbo",
"name": "gianpaj",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
}
] | /posts/Xenova/503093709017134 | 12,807 | 11 |
983464411136936 | [
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/v2ray/deepgelbooru",
"resource": {
"type": "model",
"id": "v2ray/deepgelbooru",
"discussionNum": null
},
"url": "https://huggingface.co/v2ray/deepgelbooru",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "A Danbooru tag image tagger, maybe better than WD14 at some images.",
"raw": "A Danbooru tag image tagger, maybe better than WD14 at some images.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Training code, inference code, dataset included.",
"raw": "Training code, inference code, dataset included.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": ":3",
"raw": ":3",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | https://huggingface.co/v2ray/deepgelbooru
A Danbooru tag image tagger, maybe better than WD14 at some images.
Training code, inference code, dataset included.
:3 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/fTCV7VLY0eK4OXbwgIT2n.png",
"fullname": "LagPixelLOL",
"name": "v2ray",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 115,
"isFollowing": false
} | [] | [] | [
{
"reaction": "😎",
"users": [
"John6666",
"Etherll",
"nyuuzyou"
],
"count": 3
},
{
"reaction": "🧠",
"users": [
"John6666",
"den0620"
],
"count": 2
}
] | 2024-08-23T15:11:04.000Z | 2024-08-24T15:53:47.520Z | [
{
"avatarUrl": "/avatars/0087f207c06a793c55ed0489ff793e70.svg",
"fullname": "nicolo",
"name": "nicolollo",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/fTCV7VLY0eK4OXbwgIT2n.png",
"fullname": "LagPixelLOL",
"name": "v2ray",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 115,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png",
"fullname": "John Smith",
"name": "John6666",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 384,
"isFollowing": false
}
] | /posts/v2ray/983464411136936 | 1,352 | 5 |
322138284948005 | [
{
"type": "text",
"value": "📄 ACL 2024: The Missing Papers",
"raw": "📄 ACL 2024: The Missing Papers",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Apparently, some papers from the ACL 2024 are still not listed in the ACL Anthology. While this issue will hopefully be fixed soon, we should give those papers additional spotlight.",
"raw": "Apparently, some papers from the ACL 2024 are still not listed in the ACL Anthology. While this issue will hopefully be fixed soon, we should give those papers additional spotlight.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Some of my favorites:",
"raw": "Some of my favorites:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "1. Dolma is an English corpus that encompasses 3 trillion tokens. Additionally, it is accompanied by an exceptional software package that consdierably advances the state-of-the-art in preparing data for LLM pretraining. (Source: I am currently using Dolma.)",
"raw": "1. Dolma is an English corpus that encompasses 3 trillion tokens. Additionally, it is accompanied by an exceptional software package that consdierably advances the state-of-the-art in preparing data for LLM pretraining. (Source: I am currently using Dolma.)",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2402.00159",
"resource": {
"type": "paper",
"id": "2402.00159",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2402.00159",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": "Dolma: an Open Corpus of Three Trillion Tokens for Language Model\n Pretraining Research (2402.00159)"
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "2. In the paper \"Same Task, More Tokens: the Impact of Input Length on",
"raw": "2. In the paper \"Same Task, More Tokens: the Impact of Input Length on",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "the Reasoning Performance of Large Language Models\", the authors show how extending the context length impacts an LLM's reasoning performance. I asked myself a similar question a few months ago, and therefore this paper is highly interesting to me.",
"raw": "the Reasoning Performance of Large Language Models\", the authors show how extending the context length impacts an LLM's reasoning performance. I asked myself a similar question a few months ago, and therefore this paper is highly interesting to me.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2402.14848",
"resource": {
"type": "paper",
"id": "2402.14848",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2402.14848",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": "Same Task, More Tokens: the Impact of Input Length on the Reasoning\n Performance of Large Language Models (2402.14848)"
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "This was brought to my attention through a Linkedin post by ",
"raw": "This was brought to my attention through a Linkedin post by ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "mention",
"value": null,
"raw": "@ShayeghB",
"resource": null,
"url": null,
"href": null,
"user": "ShayeghB",
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": ", who is also affected:",
"raw": ", who is also affected:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2403.00143",
"resource": {
"type": "paper",
"id": "2403.00143",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2403.00143",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": "Ensemble-Based Unsupervised Discontinuous Constituency Parsing by Tree\n Averaging (2403.00143)"
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "View all the missing papers here: ",
"raw": "View all the missing papers here: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://theshayegh.github.io/ACL2024MissingPapers/",
"resource": null,
"url": null,
"href": "https://theshayegh.github.io/ACL2024MissingPapers/",
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 📄 ACL 2024: The Missing Papers
Apparently, some papers from the ACL 2024 are still not listed in the ACL Anthology. While this issue will hopefully be fixed soon, we should give those papers additional spotlight.
Some of my favorites:
1. Dolma is an English corpus that encompasses 3 trillion tokens. Additionally, it is accompanied by an exceptional software package that consdierably advances the state-of-the-art in preparing data for LLM pretraining. (Source: I am currently using Dolma.)
https://huggingface.co/papers/2402.00159
2. In the paper "Same Task, More Tokens: the Impact of Input Length on
the Reasoning Performance of Large Language Models", the authors show how extending the context length impacts an LLM's reasoning performance. I asked myself a similar question a few months ago, and therefore this paper is highly interesting to me.
https://huggingface.co/papers/2402.14848
This was brought to my attention through a Linkedin post by @ShayeghB, who is also affected:
https://huggingface.co/papers/2403.00143
View all the missing papers here:
https://theshayegh.github.io/ACL2024MissingPapers/ | {
"avatarUrl": "/avatars/f32291df2054c1bb4a01889d1b41c0d5.svg",
"fullname": "Christopher Schröder",
"name": "cschroeder",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 17,
"isFollowing": false
} | [] | [
{
"avatarUrl": "/avatars/4bde764fb817c9f11d2ab145806d0ff5.svg",
"fullname": "Behzad Shayegh",
"name": "ShayeghB",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null
}
] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-08-23T15:10:12.000Z | 2024-08-23T21:09:55.493Z | [] | /posts/cschroeder/322138284948005 | 343 | 0 |
214845461391146 | [
{
"type": "text",
"value": "how to intaialize zerogpu on hf space?",
"raw": "how to intaialize zerogpu on hf space?",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | how to intaialize zerogpu on hf space?
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/BJLU1-6HOueSdNHrPoCQn.jpeg",
"fullname": "ahmadalfakeh",
"name": "ahmadalfakeh",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-08-23T14:07:19.000Z | 2024-08-23T16:12:24.857Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png",
"fullname": "John Smith",
"name": "John6666",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 384,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/BJLU1-6HOueSdNHrPoCQn.jpeg",
"fullname": "ahmadalfakeh",
"name": "ahmadalfakeh",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
}
] | /posts/ahmadalfakeh/214845461391146 | 300 | 2 |
250925661080568 | [
{
"type": "text",
"value": "Shoutout to everyone who participated in BigScience! Doesn't get enough credit but IMO paved the way for open-source LLMs!",
"raw": "Shoutout to everyone who participated in BigScience! Doesn't get enough credit but IMO paved the way for open-source LLMs!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2211.05100",
"resource": {
"type": "paper",
"id": "2211.05100",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2211.05100",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": "BLOOM: A 176B-Parameter Open-Access Multilingual Language Model (2211.05100)"
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/bigscience/bloom",
"resource": {
"type": "model",
"id": "bigscience/bloom",
"discussionNum": null
},
"url": "https://huggingface.co/bigscience/bloom",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/bigscience/bloomz",
"resource": {
"type": "model",
"id": "bigscience/bloomz",
"discussionNum": null
},
"url": "https://huggingface.co/bigscience/bloomz",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Shoutout to everyone who participated in BigScience! Doesn't get enough credit but IMO paved the way for open-source LLMs!
https://huggingface.co/papers/2211.05100
https://huggingface.co/bigscience/bloom
https://huggingface.co/bigscience/bloomz | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg",
"fullname": "Clem 🤗",
"name": "clem",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 1734,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/5e67bdd61009063689407479/2CoFGZcf3tXDt5oQXA5G5.png"
}
] | [] | [
{
"reaction": "🔥",
"users": [
"MaziyarPanahi",
"MohammadAminDHM",
"osanseviero",
"YaTharThShaRma999",
"John6666",
"adnankhaan",
"louisbrulenaudet"
],
"count": 7
}
] | 2024-08-23T11:21:22.000Z | 2024-08-23T11:21:22.631Z | [] | /posts/clem/250925661080568 | 1,551 | 0 |
816510167464333 | [
{
"type": "text",
"value": " 🌟 Enchanted Tales Generator: A GPT-2 Inspired Story Weaver 🌟",
"raw": " 🌟 Enchanted Tales Generator: A GPT-2 Inspired Story Weaver 🌟",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Hello Hugging Face community! I'm excited to share my latest project, the Enchanted Tales Generator, inspired by Andrej Karpathy's enlightening YouTube videos on GPT-2.",
"raw": "Hello Hugging Face community! I'm excited to share my latest project, the Enchanted Tales Generator, inspired by Andrej Karpathy's enlightening YouTube videos on GPT-2.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🧙♂️ What is the Enchanted Tales Generator?",
"raw": "🧙♂️ What is the Enchanted Tales Generator?",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "The Enchanted Tales Generator is a magical text generation app that weaves whimsical stories based on your prompts. It's powered by a fine-tuned GPT model and brings the wonder of AI-generated storytelling to your fingertips.",
"raw": "The Enchanted Tales Generator is a magical text generation app that weaves whimsical stories based on your prompts. It's powered by a fine-tuned GPT model and brings the wonder of AI-generated storytelling to your fingertips.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " 🎥 Inspiration",
"raw": " 🎥 Inspiration",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "This project was born from the inspiration I drew from Andrej Karpathy's incredible YouTube series on GPT-2. His clear explanations and deep insights into the workings of language models sparked my imagination and drove me to create something that could bring joy and creativity to others.",
"raw": "This project was born from the inspiration I drew from Andrej Karpathy's incredible YouTube series on GPT-2. His clear explanations and deep insights into the workings of language models sparked my imagination and drove me to create something that could bring joy and creativity to others.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " 🔮 Features",
"raw": " 🔮 Features",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Story Incantation: Input your magical prompt to start your tale",
"raw": "- Story Incantation: Input your magical prompt to start your tale",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Scroll Length: Adjust the length of your generated story",
"raw": "- Scroll Length: Adjust the length of your generated story",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Magical Intensity: Control the creativity (temperature) of the generation",
"raw": "- Magical Intensity: Control the creativity (temperature) of the generation",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Arcane Diversity: Fine-tune the variety of word choices (top-k sampling)",
"raw": "- Arcane Diversity: Fine-tune the variety of word choices (top-k sampling)",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🚀 Try It Out: ",
"raw": "🚀 Try It Out: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/sagar007/GPT-2-with_gpu",
"resource": {
"type": "space",
"id": "sagar007/GPT-2-with_gpu",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/sagar007/GPT-2-with_gpu",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "You can experience the Enchanted Tales Generator right here on Hugging Face Spaces: [Insert your Spaces link here]",
"raw": "You can experience the Enchanted Tales Generator right here on Hugging Face Spaces: [Insert your Spaces link here]",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " 🛠️ Behind the Scenes",
"raw": " 🛠️ Behind the Scenes",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "The app is built using:",
"raw": "The app is built using:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- PyTorch for the underlying GPT model",
"raw": "- PyTorch for the underlying GPT model",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Gradio for the user interface",
"raw": "- Gradio for the user interface",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Hugging Face Spaces for deployment",
"raw": "- Hugging Face Spaces for deployment",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "I've implemented techniques learned from Karpathy's videos, such as:",
"raw": "I've implemented techniques learned from Karpathy's videos, such as:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Custom GPT architecture",
"raw": "- Custom GPT architecture",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Top-k sampling for diverse outputs",
"raw": "- Top-k sampling for diverse outputs",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Temperature-controlled generation",
"raw": "- Temperature-controlled generation",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🌈 Future Enchantments",
"raw": "🌈 Future Enchantments",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "I'm continuously working on improving the Enchanted Tales Generator. Some ideas for future updates include:",
"raw": "I'm continuously working on improving the Enchanted Tales Generator. Some ideas for future updates include:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Fine-tuning on specific genres of stories",
"raw": "- Fine-tuning on specific genres of stories",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Adding options for character and setting generation",
"raw": "- Adding options for character and setting generation",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Implementing more advanced sampling techniques",
"raw": "- Implementing more advanced sampling techniques",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " 🤝 Join the Magic!",
"raw": " 🤝 Join the Magic!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 🌟 Enchanted Tales Generator: A GPT-2 Inspired Story Weaver 🌟
Hello Hugging Face community! I'm excited to share my latest project, the Enchanted Tales Generator, inspired by Andrej Karpathy's enlightening YouTube videos on GPT-2.
🧙♂️ What is the Enchanted Tales Generator?
The Enchanted Tales Generator is a magical text generation app that weaves whimsical stories based on your prompts. It's powered by a fine-tuned GPT model and brings the wonder of AI-generated storytelling to your fingertips.
🎥 Inspiration
This project was born from the inspiration I drew from Andrej Karpathy's incredible YouTube series on GPT-2. His clear explanations and deep insights into the workings of language models sparked my imagination and drove me to create something that could bring joy and creativity to others.
🔮 Features
- Story Incantation: Input your magical prompt to start your tale
- Scroll Length: Adjust the length of your generated story
- Magical Intensity: Control the creativity (temperature) of the generation
- Arcane Diversity: Fine-tune the variety of word choices (top-k sampling)
🚀 Try It Out: https://huggingface.co/spaces/sagar007/GPT-2-with_gpu
You can experience the Enchanted Tales Generator right here on Hugging Face Spaces: [Insert your Spaces link here]
🛠️ Behind the Scenes
The app is built using:
- PyTorch for the underlying GPT model
- Gradio for the user interface
- Hugging Face Spaces for deployment
I've implemented techniques learned from Karpathy's videos, such as:
- Custom GPT architecture
- Top-k sampling for diverse outputs
- Temperature-controlled generation
🌈 Future Enchantments
I'm continuously working on improving the Enchanted Tales Generator. Some ideas for future updates include:
- Fine-tuning on specific genres of stories
- Adding options for character and setting generation
- Implementing more advanced sampling techniques
🤝 Join the Magic!
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a464cfe0de0c5c6d8b04a1/1gCs46R_bW9apQzLQUrn5.png",
"fullname": "Sagar pallai",
"name": "sagar007",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 8,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62a464cfe0de0c5c6d8b04a1/EOM3YlHaIhPWCeYxHaVjF.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62a464cfe0de0c5c6d8b04a1/J7yPvALFC8FURI3CV-PD0.png"
}
] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
},
{
"reaction": "👍",
"users": [
"Akmalkhan"
],
"count": 1
},
{
"reaction": "🚀",
"users": [
"prithivMLmods"
],
"count": 1
}
] | 2024-08-23T09:00:10.000Z | 2024-08-23T09:01:53.590Z | [] | /posts/sagar007/816510167464333 | 749 | 0 |
949038364976284 | [
{
"type": "text",
"value": "Huge updates and improvements for FLUX LoRA training : ",
"raw": "Huge updates and improvements for FLUX LoRA training : ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://www.patreon.com/posts/kohya-flux-lora-110293257",
"resource": null,
"url": null,
"href": "https://www.patreon.com/posts/kohya-flux-lora-110293257",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "10 GB, 16 GB, 24 GB and 48 GB GPU configs added - 10 GB config is like 3x to 5x slower sadly",
"raw": "10 GB, 16 GB, 24 GB and 48 GB GPU configs added - 10 GB config is like 3x to 5x slower sadly",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Massed Compute, RunPod and Windows Kohya SS GUI LoRA installers added to the zip file",
"raw": "Massed Compute, RunPod and Windows Kohya SS GUI LoRA installers added to the zip file",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Also right now testing new 16 GB FLUX LoRA training config and new way of regularization images. Moreover testing Apply T5 Attention Mask too. Lets see if Kohya FLUX LoRA workflow will become even better or not",
"raw": "Also right now testing new 16 GB FLUX LoRA training config and new way of regularization images. Moreover testing Apply T5 Attention Mask too. Lets see if Kohya FLUX LoRA workflow will become even better or not",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Also massive grids comparisons shared here : ",
"raw": "Also massive grids comparisons shared here : ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://www.reddit.com/r/StableDiffusion/comments/1eyj4b8/kohya_ss_gui_very_easy_flux_lora_trainings_full/",
"resource": null,
"url": null,
"href": "https://www.reddit.com/r/StableDiffusion/comments/1eyj4b8/kohya_ss_gui_very_easy_flux_lora_trainings_full/",
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Huge updates and improvements for FLUX LoRA training : https://www.patreon.com/posts/kohya-flux-lora-110293257
10 GB, 16 GB, 24 GB and 48 GB GPU configs added - 10 GB config is like 3x to 5x slower sadly
Massed Compute, RunPod and Windows Kohya SS GUI LoRA installers added to the zip file
Also right now testing new 16 GB FLUX LoRA training config and new way of regularization images. Moreover testing Apply T5 Attention Mask too. Lets see if Kohya FLUX LoRA workflow will become even better or not
Also massive grids comparisons shared here : https://www.reddit.com/r/StableDiffusion/comments/1eyj4b8/kohya_ss_gui_very_easy_flux_lora_trainings_full/ | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672531901326-6345bd89fe134dfd7a0dba40.png",
"fullname": "Furkan Gözükara",
"name": "MonsterMMORPG",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 368,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/5LVg9YfsdPxg5ht1dmCDR.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/dvyt0u95hM8Bqb8k4eniG.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/L1PG7UVvKG7lcvqLzlgiF.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/_VEfw41iVtSoh6oyqv7Km.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/nUH6OIBrXFFz2NRz1A5RD.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/PZVPG9kfFsC0Whb56qIzt.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/AvE-bK5rwPAeb8vulMIlI.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/lCOUN3DRHTPRkdCaqIIC8.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/HJpLOGRb47W-IdKmXhrTA.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/BLAFmcRgZRpQCsnwIXltY.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/jRmxa7zEO2pnMdLic5SbZ.jpeg"
}
] | [] | [
{
"reaction": "👍",
"users": [
"MonsterMMORPG",
"FalconNet",
"mk230580",
"ajibawa-2023"
],
"count": 4
},
{
"reaction": "👀",
"users": [
"MonsterMMORPG",
"John6666",
"busHF"
],
"count": 3
},
{
"reaction": "🤗",
"users": [
"MonsterMMORPG",
"So2ftt",
"mk230580"
],
"count": 3
},
{
"reaction": "😎",
"users": [
"MonsterMMORPG",
"FalconNet"
],
"count": 2
},
{
"reaction": "➕",
"users": [
"MonsterMMORPG",
"FalconNet"
],
"count": 2
},
{
"reaction": "🧠",
"users": [
"MonsterMMORPG",
"FalconNet"
],
"count": 2
},
{
"reaction": "🤝",
"users": [
"MonsterMMORPG",
"FalconNet"
],
"count": 2
},
{
"reaction": "🤯",
"users": [
"MonsterMMORPG",
"FalconNet"
],
"count": 2
},
{
"reaction": "🔥",
"users": [
"MonsterMMORPG"
],
"count": 1
},
{
"reaction": "🚀",
"users": [
"MonsterMMORPG"
],
"count": 1
},
{
"reaction": "❤️",
"users": [
"MonsterMMORPG"
],
"count": 1
}
] | 2024-08-22T23:58:11.000Z | 2024-08-22T23:59:02.304Z | [] | /posts/MonsterMMORPG/949038364976284 | 2,038 | 0 |
412753691113712 | [
{
"type": "text",
"value": "So, we published ",
"raw": "So, we published ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/frameai/ChatFrame",
"resource": {
"type": "model",
"id": "frameai/ChatFrame",
"discussionNum": null
},
"url": "https://huggingface.co/frameai/ChatFrame",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " model. this model is on higher level of responsibilities. You can solve mathematics problems and write high quality codes. We are inviting all of you to help use in fine-tune this model for all languages. Please give us a languages dataset to make our model available in all languages. We are waiting for your reply.",
"raw": " model. this model is on higher level of responsibilities. You can solve mathematics problems and write high quality codes. We are inviting all of you to help use in fine-tune this model for all languages. Please give us a languages dataset to make our model available in all languages. We are waiting for your reply.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | So, we published https://huggingface.co/frameai/ChatFrame model. this model is on higher level of responsibilities. You can solve mathematics problems and write high quality codes. We are inviting all of you to help use in fine-tune this model for all languages. Please give us a languages dataset to make our model available in all languages. We are waiting for your reply. | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/653c2bc15e4f2c3e884b6743/qtq45xiTNk8GrPL0Irbar.jpeg",
"fullname": "AIEXPLORE",
"name": "explorewithai",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 7,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"John6666",
"explorewithai"
],
"count": 2
}
] | 2024-08-22T22:56:40.000Z | 2024-08-22T22:56:40.594Z | [] | /posts/explorewithai/412753691113712 | 1,351 | 0 |
175667424153668 | [
{
"type": "text",
"value": "The latest timm validation & test set results are now viewable by a leaderboard space: ",
"raw": "The latest timm validation & test set results are now viewable by a leaderboard space: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/timm/leaderboard",
"resource": {
"type": "space",
"id": "timm/leaderboard",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/timm/leaderboard",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "As of yesterday, I updated all of the results for ImageNet , ImageNet-ReaL, ImageNet-V2, ImageNet-R, ImageNet-A, and Sketch sets. The csv files can be found in the GH repo ",
"raw": "As of yesterday, I updated all of the results for ImageNet , ImageNet-ReaL, ImageNet-V2, ImageNet-R, ImageNet-A, and Sketch sets. The csv files can be found in the GH repo ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/huggingface/pytorch-image-models/tree/main/results",
"resource": null,
"url": null,
"href": "https://github.com/huggingface/pytorch-image-models/tree/main/results",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Unfortunately the latest benchmark csv files are not yet up to date, there are some gaps in dataset results vs throughput/flop numbers impact the plots.",
"raw": "Unfortunately the latest benchmark csv files are not yet up to date, there are some gaps in dataset results vs throughput/flop numbers impact the plots.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "h/t to ",
"raw": "h/t to ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "mention",
"value": null,
"raw": "@MohamedRashad",
"resource": null,
"url": null,
"href": null,
"user": "MohamedRashad",
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " for making the first timm leaderboard.",
"raw": " for making the first timm leaderboard.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | The latest timm validation & test set results are now viewable by a leaderboard space: https://huggingface.co/spaces/timm/leaderboard
As of yesterday, I updated all of the results for ImageNet , ImageNet-ReaL, ImageNet-V2, ImageNet-R, ImageNet-A, and Sketch sets. The csv files can be found in the GH repo https://github.com/huggingface/pytorch-image-models/tree/main/results
Unfortunately the latest benchmark csv files are not yet up to date, there are some gaps in dataset results vs throughput/flop numbers impact the plots.
h/t to @MohamedRashad for making the first timm leaderboard.
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1667002643224-604a5184dca2c7ac7508b849.jpeg",
"fullname": "Ross Wightman",
"name": "rwightman",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 214,
"isFollowing": false
} | [] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1628885133347-6116d0584ef9fdfbf45dc4d9.jpeg",
"fullname": "Mohamed Rashad",
"name": "MohamedRashad",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 140
}
] | [
{
"reaction": "🚀",
"users": [
"eliebak",
"John6666",
"osanseviero",
"sergiopaniego",
"bryant1410",
"nagaharish"
],
"count": 6
},
{
"reaction": "🔥",
"users": [
"eliebak",
"MohamedRashad",
"bryant1410",
"nagaharish"
],
"count": 4
}
] | 2024-08-22T22:09:30.000Z | 2024-08-23T02:02:30.638Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1628885133347-6116d0584ef9fdfbf45dc4d9.jpeg",
"fullname": "Mohamed Rashad",
"name": "MohamedRashad",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 140,
"isFollowing": false
}
] | /posts/rwightman/175667424153668 | 2,057 | 1 |
759806807282910 | [
{
"type": "text",
"value": "𝗔𝗜𝟮𝟭 𝗶𝘁𝗲𝗿𝗮𝘁𝗲𝘀 𝘄𝗶𝘁𝗵 𝗻𝗲𝘄 𝗝𝗮𝗺𝗯𝗮 𝟭.𝟱 𝗿𝗲𝗹𝗲𝗮𝘀𝗲: 𝗡𝗲𝘄 𝘀𝘁𝗮𝗻𝗱𝗮𝗿𝗱 𝗳𝗼𝗿 𝗹𝗼𝗻𝗴-𝗰𝗼𝗻𝘁𝗲𝘅𝘁 𝘂𝘀𝗲-𝗰𝗮𝘀𝗲𝘀!🏅",
"raw": "𝗔𝗜𝟮𝟭 𝗶𝘁𝗲𝗿𝗮𝘁𝗲𝘀 𝘄𝗶𝘁𝗵 𝗻𝗲𝘄 𝗝𝗮𝗺𝗯𝗮 𝟭.𝟱 𝗿𝗲𝗹𝗲𝗮𝘀𝗲: 𝗡𝗲𝘄 𝘀𝘁𝗮𝗻𝗱𝗮𝗿𝗱 𝗳𝗼𝗿 𝗹𝗼𝗻𝗴-𝗰𝗼𝗻𝘁𝗲𝘅𝘁 𝘂𝘀𝗲-𝗰𝗮𝘀𝗲𝘀!🏅",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "mention",
"value": null,
"raw": "@ai21labs",
"resource": null,
"url": null,
"href": null,
"user": "ai21labs",
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " used a different architecture to beat the status-quo Transformers models: Jamba architecture combines classic Transformers layers with the new Mamba layers, for which the complexity is a linear (instead of quadratic) function of the context length.",
"raw": " used a different architecture to beat the status-quo Transformers models: Jamba architecture combines classic Transformers layers with the new Mamba layers, for which the complexity is a linear (instead of quadratic) function of the context length.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "What does this imply?",
"raw": "What does this imply?",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "➡️ Jamba models are much more efficient for long contexts: faster (up to 2.5x faster for long context), takes less memory, and also performs better to recall everything in the prompt.",
"raw": "➡️ Jamba models are much more efficient for long contexts: faster (up to 2.5x faster for long context), takes less memory, and also performs better to recall everything in the prompt.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "That means it’s a new go-to model for RAG or agentic applications!",
"raw": "That means it’s a new go-to model for RAG or agentic applications!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "And the performance is not too shabby: Jamba 1.5 models are comparable in perf to similar-sized Llama-3.1 models! The largest model even outperforms Llama-3.1 405B on Arena-Hard.",
"raw": "And the performance is not too shabby: Jamba 1.5 models are comparable in perf to similar-sized Llama-3.1 models! The largest model even outperforms Llama-3.1 405B on Arena-Hard.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "✌️ Comes in 2 sizes: Mini (12B active/52B) and Large (94B active/399B)",
"raw": "✌️ Comes in 2 sizes: Mini (12B active/52B) and Large (94B active/399B)",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "📏 Both deliver 256k context length, for low memory: Jamba-1.5 mini fits 140k context length on one single A100.",
"raw": "📏 Both deliver 256k context length, for low memory: Jamba-1.5 mini fits 140k context length on one single A100.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "⚙️ New quanttization method: Experts Int8 quantizes only the weights parts of the MoE layers, which account for 85% of weights",
"raw": "⚙️ New quanttization method: Experts Int8 quantizes only the weights parts of the MoE layers, which account for 85% of weights",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🤖 Natively supports JSON format generation & function calling.",
"raw": "🤖 Natively supports JSON format generation & function calling.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🔓 Permissive license *if your org makes <$50M revenue*",
"raw": "🔓 Permissive license *if your org makes <$50M revenue*",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Available on the Hub 👉 ",
"raw": "Available on the Hub 👉 ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/ai21labs/jamba-15-66c44befa474a917fcf55251",
"resource": {
"type": "collection",
"id": "ai21labs/jamba-15-66c44befa474a917fcf55251",
"discussionNum": null
},
"url": "https://huggingface.co/collections/ai21labs/jamba-15-66c44befa474a917fcf55251",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Read their release blog post 👉 ",
"raw": "Read their release blog post 👉 ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://www.ai21.com/blog/announcing-jamba-model-family",
"resource": null,
"url": null,
"href": "https://www.ai21.com/blog/announcing-jamba-model-family",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 𝗔𝗜𝟮𝟭 𝗶𝘁𝗲𝗿𝗮𝘁𝗲𝘀 𝘄𝗶𝘁𝗵 𝗻𝗲𝘄 𝗝𝗮𝗺𝗯𝗮 𝟭.𝟱 𝗿𝗲𝗹𝗲𝗮𝘀𝗲: 𝗡𝗲𝘄 𝘀𝘁𝗮𝗻𝗱𝗮𝗿𝗱 𝗳𝗼𝗿 𝗹𝗼𝗻𝗴-𝗰𝗼𝗻𝘁𝗲𝘅𝘁 𝘂𝘀𝗲-𝗰𝗮𝘀𝗲𝘀!🏅
@ai21labs used a different architecture to beat the status-quo Transformers models: Jamba architecture combines classic Transformers layers with the new Mamba layers, for which the complexity is a linear (instead of quadratic) function of the context length.
What does this imply?
➡️ Jamba models are much more efficient for long contexts: faster (up to 2.5x faster for long context), takes less memory, and also performs better to recall everything in the prompt.
That means it’s a new go-to model for RAG or agentic applications!
And the performance is not too shabby: Jamba 1.5 models are comparable in perf to similar-sized Llama-3.1 models! The largest model even outperforms Llama-3.1 405B on Arena-Hard.
✌️ Comes in 2 sizes: Mini (12B active/52B) and Large (94B active/399B)
📏 Both deliver 256k context length, for low memory: Jamba-1.5 mini fits 140k context length on one single A100.
⚙️ New quanttization method: Experts Int8 quantizes only the weights parts of the MoE layers, which account for 85% of weights
🤖 Natively supports JSON format generation & function calling.
🔓 Permissive license *if your org makes <$50M revenue*
Available on the Hub 👉 https://huggingface.co/collections/ai21labs/jamba-15-66c44befa474a917fcf55251
Read their release blog post 👉 https://www.ai21.com/blog/announcing-jamba-model-family
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg",
"fullname": "Aymeric Roucher",
"name": "m-ric",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 476,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/BIKp5t7VBDRBfPPxCIYUz.png"
}
] | [] | [
{
"reaction": "❤️",
"users": [
"clem",
"M1cler",
"alielfilali01"
],
"count": 3
}
] | 2024-08-22T13:35:33.000Z | 2024-08-22T15:34:04.558Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/630920925a5c889aaedc7f33/w00N19M21l2FXe6ZasSYc.jpeg",
"fullname": "Kristaller486",
"name": "kristaller486",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg",
"fullname": "Aymeric Roucher",
"name": "m-ric",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 476,
"isFollowing": false
}
] | /posts/m-ric/759806807282910 | 918 | 2 |
182147151896536 | [
{
"type": "text",
"value": "# Offensive Security Reconnaissance Continued with Public Facing Industrial Control System HMIs using Moondream",
"raw": "# Offensive Security Reconnaissance Continued with Public Facing Industrial Control System HMIs using Moondream",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Building on my previous experiments with Moondream for physical security reconnaissance planning automation (",
"raw": "Building on my previous experiments with Moondream for physical security reconnaissance planning automation (",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/posts/Csplk/926337297827024",
"resource": null,
"url": null,
"href": "https://huggingface.co/posts/Csplk/926337297827024",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "), I've now turned my attention to exploring the potential of this powerful image-text-text model for offensive security reconnaissance in the realm of Industrial Control Systems (ICS).",
"raw": "), I've now turned my attention to exploring the potential of this powerful image-text-text model for offensive security reconnaissance in the realm of Industrial Control Systems (ICS).",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "ICS HMIs (Human-Machine Interfaces) are increasingly exposed to the public internet, often without adequate security measures in place. This presents a tantalizing opportunity for malicious actors to exploit vulnerabilities and gain unauthorized access to critical infrastructure.",
"raw": "ICS HMIs (Human-Machine Interfaces) are increasingly exposed to the public internet, often without adequate security measures in place. This presents a tantalizing opportunity for malicious actors to exploit vulnerabilities and gain unauthorized access to critical infrastructure.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Using Moondream with batch processing (",
"raw": "Using Moondream with batch processing (",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/Csplk/moondream2-batch-processing",
"resource": {
"type": "space",
"id": "Csplk/moondream2-batch-processing",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/Csplk/moondream2-batch-processing",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "), I've been experimenting with analyzing public facing ICS (",
"raw": "), I've been experimenting with analyzing public facing ICS (",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/Csplk/ICS_UIs",
"resource": {
"type": "dataset",
"id": "Csplk/ICS_UIs",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/Csplk/ICS_UIs",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": ") HMI (",
"raw": ") HMI (",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/Csplk/HMI",
"resource": {
"type": "dataset",
"id": "Csplk/HMI",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/Csplk/HMI",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": ") screenshots from shodan to identify types of exposed ICS system HMIs, how they are operated and how malicious actors with access to these systems could cause damage to physical infrastructure. Feeding images of HMIs and pre-defined text prompts to Moondream batch processing successfully (unconfirmed accuracy levels) extracted information about the underlying systems, including",
"raw": ") screenshots from shodan to identify types of exposed ICS system HMIs, how they are operated and how malicious actors with access to these systems could cause damage to physical infrastructure. Feeding images of HMIs and pre-defined text prompts to Moondream batch processing successfully (unconfirmed accuracy levels) extracted information about the underlying systems, including",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "1. **System type**",
"raw": "1. **System type**",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "2. **Possible Operation Details**",
"raw": "2. **Possible Operation Details**",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "3. **Malicious Actor Outcomes**",
"raw": "3. **Malicious Actor Outcomes**",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Next steps:",
"raw": "Next steps:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "* I have a longer and more in depth blog write up in the works that will cover the previous and this post's approaches for experiments for sharing via HF community blog posts soon.",
"raw": "* I have a longer and more in depth blog write up in the works that will cover the previous and this post's approaches for experiments for sharing via HF community blog posts soon.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "* I plan to continue refining my Moondream-based tool to improve its accuracy and effectiveness in processing public facing ICS HMIs.",
"raw": "* I plan to continue refining my Moondream-based tool to improve its accuracy and effectiveness in processing public facing ICS HMIs.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "* As mentioned before, offensive security with moondream focused HF Space once its fleshed out. ",
"raw": "* As mentioned before, offensive security with moondream focused HF Space once its fleshed out. ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Thanks again to ",
"raw": "Thanks again to ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "mention",
"value": null,
"raw": "@vikhyatk",
"resource": null,
"url": null,
"href": null,
"user": "vikhyatk",
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " for the incredible Moondream model. ",
"raw": " for the incredible Moondream model. ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/vikhyatk/moondream2",
"resource": {
"type": "model",
"id": "vikhyatk/moondream2",
"discussionNum": null
},
"url": "https://huggingface.co/vikhyatk/moondream2",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | # Offensive Security Reconnaissance Continued with Public Facing Industrial Control System HMIs using Moondream
Building on my previous experiments with Moondream for physical security reconnaissance planning automation (https://huggingface.co/posts/Csplk/926337297827024), I've now turned my attention to exploring the potential of this powerful image-text-text model for offensive security reconnaissance in the realm of Industrial Control Systems (ICS).
ICS HMIs (Human-Machine Interfaces) are increasingly exposed to the public internet, often without adequate security measures in place. This presents a tantalizing opportunity for malicious actors to exploit vulnerabilities and gain unauthorized access to critical infrastructure.
Using Moondream with batch processing (https://huggingface.co/spaces/Csplk/moondream2-batch-processing), I've been experimenting with analyzing public facing ICS (https://huggingface.co/datasets/Csplk/ICS_UIs) HMI (https://huggingface.co/datasets/Csplk/HMI) screenshots from shodan to identify types of exposed ICS system HMIs, how they are operated and how malicious actors with access to these systems could cause damage to physical infrastructure. Feeding images of HMIs and pre-defined text prompts to Moondream batch processing successfully (unconfirmed accuracy levels) extracted information about the underlying systems, including
1. **System type**
2. **Possible Operation Details**
3. **Malicious Actor Outcomes**
Next steps:
* I have a longer and more in depth blog write up in the works that will cover the previous and this post's approaches for experiments for sharing via HF community blog posts soon.
* I plan to continue refining my Moondream-based tool to improve its accuracy and effectiveness in processing public facing ICS HMIs.
* As mentioned before, offensive security with moondream focused HF Space once its fleshed out.
Thanks again to @vikhyatk for the incredible Moondream model. https://huggingface.co/vikhyatk/moondream2 | {
"avatarUrl": "/avatars/b2725bb163fa15d6c5856121780d52eb.svg",
"fullname": "Ci Splunk",
"name": "Csplk",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 43,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62d93a2b28f9c86a40314043/GGWt0HG4Qu9TyC6fuizON.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62d93a2b28f9c86a40314043/_9mddraBhISrYlFsaYEnU.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62d93a2b28f9c86a40314043/cEzPCp9s8FI8JSySMauhn.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62d93a2b28f9c86a40314043/_jMbHjLqMZO3_tsVOBku1.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62d93a2b28f9c86a40314043/NZ9HasCq8cylFR11yRje1.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62d93a2b28f9c86a40314043/t0iFd8WuYzQhumIgnuHE-.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62d93a2b28f9c86a40314043/ZBGIdBvZpXPRSRFxRsEvF.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62d93a2b28f9c86a40314043/eaiGftpSK9YREZE8WhgQB.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62d93a2b28f9c86a40314043/_hCSD33cFQOlpisaUlJjR.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62d93a2b28f9c86a40314043/FQr8-iWb-C5yMHnCVbiNi.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62d93a2b28f9c86a40314043/tX_BQMwTyO597HXbE2CM-.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62d93a2b28f9c86a40314043/P_qYRLhlgka7294_z6NO3.png"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63117568fa95534e218da163/8h9zN8aKRxPLBnXW7sqY9.jpeg",
"fullname": "Vik Korrapati",
"name": "vikhyatk",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 365
}
] | [
{
"reaction": "➕",
"users": [
"merterbak",
"jaden2",
"efecelik",
"odyss3y"
],
"count": 4
},
{
"reaction": "👀",
"users": [
"John6666",
"odyss3y",
"rreed-pha",
"adarshxs"
],
"count": 4
},
{
"reaction": "🔥",
"users": [
"vikhyatk",
"ajibawa-2023"
],
"count": 2
}
] | 2024-08-22T11:14:12.000Z | 2024-08-22T22:11:13.998Z | [] | /posts/Csplk/182147151896536 | 2,245 | 0 |
813242102494420 | [
{
"type": "text",
"value": "🎉 Just dropped a fresh version of dataset-viber along with some cool, Gradio-based annotators! These tools aren't about formalities—they're here to help you quickly collect feedback and get your projects moving along to a more serious stage, ahumm ",
"raw": "🎉 Just dropped a fresh version of dataset-viber along with some cool, Gradio-based annotators! These tools aren't about formalities—they're here to help you quickly collect feedback and get your projects moving along to a more serious stage, ahumm ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "mention",
"value": null,
"raw": "@argilla",
"resource": null,
"url": null,
"href": null,
"user": "argilla",
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": ".",
"raw": ".",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Some new features!",
"raw": "Some new features!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- manual import from a CSV or the Hugging Face Hub",
"raw": "- manual import from a CSV or the Hugging Face Hub",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- manual export to CSV or the Hub",
"raw": "- manual export to CSV or the Hub",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- improved automated export to the Hub and CSV",
"raw": "- improved automated export to the Hub and CSV",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- limit interaction with specific components",
"raw": "- limit interaction with specific components",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- stream data with custom next_input features (SO to Ben Burtenshaw for the suggestions)",
"raw": "- stream data with custom next_input features (SO to Ben Burtenshaw for the suggestions)",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- model in-the-loop support for all tasks",
"raw": "- model in-the-loop support for all tasks",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/dataset-viber/gradio-annotators-66c5ce73d5e3bf99caa445b1",
"resource": {
"type": "collection",
"id": "dataset-viber/gradio-annotators-66c5ce73d5e3bf99caa445b1",
"discussionNum": null
},
"url": "https://huggingface.co/collections/dataset-viber/gradio-annotators-66c5ce73d5e3bf99caa445b1",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 🎉 Just dropped a fresh version of dataset-viber along with some cool, Gradio-based annotators! These tools aren't about formalities—they're here to help you quickly collect feedback and get your projects moving along to a more serious stage, ahumm @argilla.
Some new features!
- manual import from a CSV or the Hugging Face Hub
- manual export to CSV or the Hub
- improved automated export to the Hub and CSV
- limit interaction with specific components
- stream data with custom next_input features (SO to Ben Burtenshaw for the suggestions)
- model in-the-loop support for all tasks
https://huggingface.co/collections/dataset-viber/gradio-annotators-66c5ce73d5e3bf99caa445b1 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1677141720071-634ff41ff32062e9eb7b06a3.jpeg",
"fullname": "David Berenstein",
"name": "davidberenstein1957",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 148,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666",
"osanseviero"
],
"count": 2
}
] | 2024-08-22T10:20:54.000Z | 2024-08-23T15:04:27.918Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1677141720071-634ff41ff32062e9eb7b06a3.jpeg",
"fullname": "David Berenstein",
"name": "davidberenstein1957",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 148,
"isFollowing": false
}
] | /posts/davidberenstein1957/813242102494420 | 1,300 | 3 |
519348337153938 | [
{
"type": "text",
"value": "BIG update dropped for ",
"raw": "BIG update dropped for ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/bigdata-pw/Flickr",
"resource": {
"type": "dataset",
"id": "bigdata-pw/Flickr",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/bigdata-pw/Flickr",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " - now ~515M images! Target for the next update: 1B",
"raw": " - now ~515M images! Target for the next update: 1B",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "In case you missed them; other recent drops include ",
"raw": "In case you missed them; other recent drops include ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/bigdata-pw/Dinosaurs",
"resource": {
"type": "dataset",
"id": "bigdata-pw/Dinosaurs",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/bigdata-pw/Dinosaurs",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " - a small set of BIG creatures 🦕🦖 and the first in a series of articles about the art of web scraping! ",
"raw": " - a small set of BIG creatures 🦕🦖 and the first in a series of articles about the art of web scraping! ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/hlky/web-scraping-101",
"resource": null,
"url": null,
"href": "https://huggingface.co/blog/hlky/web-scraping-101",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/hlky/web-scraping-102",
"resource": null,
"url": null,
"href": "https://huggingface.co/blog/hlky/web-scraping-102",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Stay tuned for exciting datasets and models coming soon:",
"raw": "Stay tuned for exciting datasets and models coming soon:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- PC and Console game screenshots",
"raw": "- PC and Console game screenshots",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- TV/Film actors biographies and photos (think facial recognition and automatic captioning!)",
"raw": "- TV/Film actors biographies and photos (think facial recognition and automatic captioning!)",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- ",
"raw": "- ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/bigdata-pw/lyrics-gpt",
"resource": {
"type": "model",
"id": "bigdata-pw/lyrics-gpt",
"discussionNum": null
},
"url": "https://huggingface.co/bigdata-pw/lyrics-gpt",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " v2",
"raw": " v2",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- and more!",
"raw": "- and more!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | BIG update dropped for https://huggingface.co/datasets/bigdata-pw/Flickr - now ~515M images! Target for the next update: 1B
In case you missed them; other recent drops include https://huggingface.co/datasets/bigdata-pw/Dinosaurs - a small set of BIG creatures 🦕🦖 and the first in a series of articles about the art of web scraping! https://huggingface.co/blog/hlky/web-scraping-101 https://huggingface.co/blog/hlky/web-scraping-102
Stay tuned for exciting datasets and models coming soon:
- PC and Console game screenshots
- TV/Film actors biographies and photos (think facial recognition and automatic captioning!)
- https://huggingface.co/bigdata-pw/lyrics-gpt v2
- and more! | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/665074ded3e886a93d713e73/tTAkhsz2J-uEQAfSsRvAf.jpeg",
"fullname": "hlky",
"name": "hlky",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 17,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"victor",
"John6666",
"Gatozu35",
"Nelathan",
"ZeroWw"
],
"count": 5
}
] | 2024-08-21T15:06:24.000Z | 2024-09-04T19:26:33.477Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/665074ded3e886a93d713e73/tTAkhsz2J-uEQAfSsRvAf.jpeg",
"fullname": "hlky",
"name": "hlky",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 17,
"isFollowing": false
},
{
"avatarUrl": "/avatars/6e162f37ccc544f256c7cf5d1171d406.svg",
"fullname": "Matt H",
"name": "MattHVisual",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
}
] | /posts/hlky/519348337153938 | 2,152 | 11 |
209586816604878 | [
{
"type": "text",
"value": "🚨 NEW TASK ALERT 🚨",
"raw": "🚨 NEW TASK ALERT 🚨",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Extractive Question Answering: because sometimes generative is not all you need 😉",
"raw": "Extractive Question Answering: because sometimes generative is not all you need 😉",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "AutoTrain is the only open-source, no code solution to offer so many tasks across different modalities. Current task count: 23 🚀",
"raw": "AutoTrain is the only open-source, no code solution to offer so many tasks across different modalities. Current task count: 23 🚀",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Check out the blog post on getting started with this task: ",
"raw": "Check out the blog post on getting started with this task: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/abhishek/extractive-qa-autotrain",
"resource": null,
"url": null,
"href": "https://huggingface.co/blog/abhishek/extractive-qa-autotrain",
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 🚨 NEW TASK ALERT 🚨
Extractive Question Answering: because sometimes generative is not all you need 😉
AutoTrain is the only open-source, no code solution to offer so many tasks across different modalities. Current task count: 23 🚀
Check out the blog post on getting started with this task: https://huggingface.co/blog/abhishek/extractive-qa-autotrain | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5fa19f4ba13e063b8b2b5e11/nGVHdTYX2udnt-K8mqY27.jpeg",
"fullname": "Abhishek Thakur",
"name": "abhishek",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 1379,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-08-21T14:08:31.000Z | 2024-08-21T14:08:31.891Z | [] | /posts/abhishek/209586816604878 | 1,842 | 0 |
964839563451127 | [
{
"type": "text",
"value": "🙋 Calling all Hugging Face users! We want to hear from YOU!",
"raw": "🙋 Calling all Hugging Face users! We want to hear from YOU!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "What feature or improvement would make the biggest impact on Hugging Face?",
"raw": "What feature or improvement would make the biggest impact on Hugging Face?",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Whether it's the Hub, better documentation, new integrations, or something completely different – we're all ears!",
"raw": "Whether it's the Hub, better documentation, new integrations, or something completely different – we're all ears!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Your feedback shapes the future of Hugging Face. Drop your ideas in the comments below! 👇",
"raw": "Your feedback shapes the future of Hugging Face. Drop your ideas in the comments below! 👇",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 🙋 Calling all Hugging Face users! We want to hear from YOU!
What feature or improvement would make the biggest impact on Hugging Face?
Whether it's the Hub, better documentation, new integrations, or something completely different – we're all ears!
Your feedback shapes the future of Hugging Face. Drop your ideas in the comments below! 👇 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg",
"fullname": "Victor Mustar",
"name": "victor",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 2578,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"lazarustda",
"clem",
"jsulz",
"Saugatkafley",
"lukmanaj",
"sergiopaniego",
"not-lain",
"nicoboss",
"cfahlgren1",
"Tonic",
"nbroad",
"denizaybey",
"Nymbo",
"John6666",
"DamarJati",
"ajibawa-2023",
"MarinaraSpaghetti",
"KingNish",
"asigalov61",
"iafun",
"cschroeder",
"RGAES",
"louisbrulenaudet",
"enzostvs",
"blanchon",
"khitab",
"kramp",
"Xdotnet",
"oritey"
],
"count": 29
},
{
"reaction": "👀",
"users": [
"clem",
"jsulz",
"not-lain",
"nicoboss",
"cfahlgren1",
"pierrci",
"nbroad",
"Nymbo",
"John6666",
"MarinaraSpaghetti",
"merterbak",
"nkasmanoff",
"TanVPat",
"blanchon",
"osanseviero"
],
"count": 15
},
{
"reaction": "❤️",
"users": [
"ijohn07",
"MarinaraSpaghetti",
"sauravssss",
"Rybens",
"nyuuzyou",
"asigalov61",
"iafun",
"MoritzLaurer",
"blanchon",
"John6666"
],
"count": 10
},
{
"reaction": "🤗",
"users": [
"prithivMLmods",
"asigalov61",
"iafun",
"John6666",
"onekq"
],
"count": 5
},
{
"reaction": "🤝",
"users": [
"ha1772007",
"asigalov61",
"John6666"
],
"count": 3
},
{
"reaction": "👍",
"users": [
"John6666",
"onekq"
],
"count": 2
},
{
"reaction": "🚀",
"users": [
"John6666"
],
"count": 1
},
{
"reaction": "➕",
"users": [
"John6666"
],
"count": 1
},
{
"reaction": "😎",
"users": [
"John6666"
],
"count": 1
}
] | 2024-08-21T13:35:56.000Z | 2024-11-14T16:55:03.293Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1669235837892-noauth.jpeg",
"fullname": "Atlus",
"name": "Atlusmax",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61929226ded356549e20c5da/ONUjP2S5fUWd07BiFXm0i.jpeg",
"fullname": "Sergio Paniego",
"name": "sergiopaniego",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 10,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6527e89a8808d80ccff88b7a/BRKGVgk_dJO34ZOi3Slb_.jpeg",
"fullname": "Lain",
"name": "not-lain",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 919,
"isFollowing": false
},
{
"avatarUrl": "/avatars/20d40755196d07a3e9ce2ac65d322d10.svg",
"fullname": "Nico Bosshard",
"name": "nicoboss",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 10,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg",
"fullname": "Victor Mustar",
"name": "victor",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 2578,
"isFollowing": false
},
{
"avatarUrl": "/avatars/c94a447e4ef4e02d24e7e15eaa5a7908.svg",
"fullname": "Mykyta Khorosh",
"name": "MkarOnFx",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64371b564aacf7bf786fb530/0lZEdVu06bx11fy1uTjpt.jpeg",
"fullname": "Nymbo",
"name": "Nymbo",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 229,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png",
"fullname": "John Smith",
"name": "John6666",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 384,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65af0a0da560bc9329dc2daf/pLNxJNw4xkgDNahgmqFwH.jpeg",
"fullname": "ijohn free life",
"name": "ijohn07",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 37,
"isFollowing": false
},
{
"avatarUrl": "/avatars/706d7fd65c992ab5a05da40a7c4ebdfd.svg",
"fullname": "Himanshu",
"name": "ha1772007",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65d883893a52cd9bcd8ab7cf/tRsCJlHNZo1D02kBTmfy9.jpeg",
"fullname": "leroy Samuel Dyer",
"name": "LeroyDyer",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 82,
"isFollowing": false
},
{
"avatarUrl": "/avatars/267aa5ea8737550f7e5cfb53718f09a8.svg",
"fullname": "iafun",
"name": "iafun",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/656d73907825b310103b992d/oWmOZDt1uP5UN1WoIlGVB.jpeg",
"fullname": "sea be seen",
"name": "seaseen",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/651d4e73acd8e9168ac92b04/WMYCWKx9MM8Xxj8vXursD.png",
"fullname": "Jonah Ramponi",
"name": "jonah-ramponi",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/Qz1WRFYfs6ZSnRz88wEZt.jpeg",
"fullname": "Deniz Aybey",
"name": "denizaybey",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 3,
"isFollowing": false
},
{
"avatarUrl": "/avatars/52a153d04d325469e1be69bce610ebe5.svg",
"fullname": "ecyht2",
"name": "ecyht2",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6612aedf09f16e7347dfa7e1/bPYjBXCedY_1fSIPjoBTY.jpeg",
"fullname": "Nishith Jain",
"name": "KingNish",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1072,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65bb837dbfb878f46c77de4c/UVtVbF_3rdt0DC8xTkpL1.jpeg",
"fullname": "Prithiv Sakthi",
"name": "prithivMLmods",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 342,
"isFollowing": false
},
{
"avatarUrl": "/avatars/344c6446d92cb2c409ee39627e7cab49.svg",
"fullname": "Aronson",
"name": "Jason789451",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f57ea2d3f32f12a3c0692e6/b-9GG2p--smCameUPeCBN.jpeg",
"fullname": "Alex",
"name": "asigalov61",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 62,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/DQHUtTISeJ0pYt5DEu_lD.png",
"fullname": "Hidden Forces",
"name": "Hidden-Forces",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6435718aaaef013d1aec3b8b/XKf-8MA47tjVAM6SCX0MP.jpeg",
"fullname": "Bartowski",
"name": "bartowski",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 2735,
"isFollowing": false
},
{
"avatarUrl": "/avatars/b712583bd0e481b216401dee54a8b514.svg",
"fullname": "asgs",
"name": "john71",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64cba00d710645aa7b04f281/a_-LPwd4wqRyi8sJ1QxjI.jpeg",
"fullname": "Husnain",
"name": "Niansuh",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 64,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/653bccf9c5ba23359b23a76b/Na3jFV7tNdB_DXh-iXk_D.png",
"fullname": "cgus",
"name": "cgus",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 5,
"isFollowing": false
},
{
"avatarUrl": "/avatars/f32291df2054c1bb4a01889d1b41c0d5.svg",
"fullname": "Christopher Schröder",
"name": "cschroeder",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 17,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6353e36a5eac2d2efa75699e/r2jeIt9rK2ggVfUwPqoa6.png",
"fullname": "Stefan-Gabriel Muscalu",
"name": "legraphista",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 75,
"isFollowing": false
},
{
"avatarUrl": "/avatars/81c246e912efda163baac7f3a248163e.svg",
"fullname": "Bradock",
"name": "0xBradock",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/646c83c871d0c8a6e4455854/b5Act86SZ7zuUXmLOIqYj.png",
"fullname": "digiplay",
"name": "digiplay",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 165,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63239b8370edc53f51cd5d42/88od0k-AAkxAIV-5ULwDs.png",
"fullname": "Yn Tec",
"name": "Yntec",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1994,
"isFollowing": false
},
{
"avatarUrl": "/avatars/ef7b193937e6b345dbde7fd41f50297a.svg",
"fullname": "santiaga barandiaran",
"name": "santxe",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/652bb5c9f60799e9a45ae17b/j6IaagL3fXJHPnnTeUyrV.png",
"fullname": "Fashion Stash",
"name": "FashionStash",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1649143001781-624bebf604abc7ebb01789af.jpeg",
"fullname": "Apolinário from multimodal AI art",
"name": "multimodalart",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 3149,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647d6fe3adaf5cc26dab3917/AyI6Z_2X5HoLko4vpQhk0.png",
"fullname": "Peng",
"name": "pandases",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61b253b7ac5ecaae3d1efe0c/hwiQ0uvz3t-L5a-NtBIO6.png",
"fullname": "Joshua",
"name": "Xenova",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 3736,
"isFollowing": false
},
{
"avatarUrl": "/avatars/c51c46e23f8cb2aecad8077d43e4842a.svg",
"fullname": "bergh",
"name": "hanna19",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6459fa0f5b3111fbe83286e1/UhCa7JNbtTjC6dgOjZtH0.jpeg",
"fullname": "Louis Brulé Naudet",
"name": "louisbrulenaudet",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 176,
"isFollowing": false
},
{
"avatarUrl": "/avatars/0c1dd3ebc0e2c8ecf6c771d3728accf9.svg",
"fullname": "Razvan",
"name": "razvanab",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 4,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1614079701740-6033e34a9aa44495c80dd043.jpeg",
"fullname": "Birger Moell",
"name": "birgermoell",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 31,
"isFollowing": false
},
{
"avatarUrl": "/avatars/e2c242385e629315a76bc250aa633b65.svg",
"fullname": "you",
"name": "megachad",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "/avatars/715ab84f0231708fbe1a4cde0ee1ca8a.svg",
"fullname": "Eren Irmak",
"name": "martineden",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3,
"isFollowing": false
},
{
"avatarUrl": "/avatars/2e1252e81d8687173b34daba8058da33.svg",
"fullname": "maxwell andrews",
"name": "madmaxbr5",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "/avatars/7e26cfd48dccef52587739988a9114cf.svg",
"fullname": "Roberto de Jesús Alfaro López",
"name": "Alfarrow",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "/avatars/aa8e2e38e07d1fa0d2dc611723bc8f4c.svg",
"fullname": "Łael Al-Halawani",
"name": "ljhwild",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "/avatars/be539f0d94e0cafb289d7f095484978a.svg",
"fullname": "Adrianna Wojtunik",
"name": "awojtunik",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "/avatars/0006334ddac07b437d4d7f267b571fb9.svg",
"fullname": "QROST",
"name": "czd358121692",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6593502ca2607099284523db/13IfQE8qnJsjPXbOeGrLa.jpeg",
"fullname": "william marshall",
"name": "fuzzy-mittenz",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 16,
"isFollowing": false
}
] | /posts/victor/964839563451127 | 5,393 | 139 |
257466309291875 | [
{
"type": "text",
"value": "This isn’t a goal of ours because we have plenty of money in the bank but quite excited to see that ",
"raw": "This isn’t a goal of ours because we have plenty of money in the bank but quite excited to see that ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "mention",
"value": null,
"raw": "@huggingfaceis",
"resource": null,
"url": null,
"href": null,
"user": "huggingfaceis",
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " profitable these days, with 220 team members and most of our platform being free (like model hosting) and open-source for the community! ",
"raw": " profitable these days, with 220 team members and most of our platform being free (like model hosting) and open-source for the community! ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Especially noteworthy at a time when most AI startups wouldn’t survive a year or two without VC money. Yay!",
"raw": "Especially noteworthy at a time when most AI startups wouldn’t survive a year or two without VC money. Yay!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | This isn’t a goal of ours because we have plenty of money in the bank but quite excited to see that @huggingfaceis profitable these days, with 220 team members and most of our platform being free (like model hosting) and open-source for the community!
Especially noteworthy at a time when most AI startups wouldn’t survive a year or two without VC money. Yay! | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg",
"fullname": "Clem 🤗",
"name": "clem",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 1734,
"isFollowing": false
} | [] | [] | [
{
"reaction": "❤️",
"users": [
"Priyankvadaliya",
"Claudiu2004",
"siaal313",
"hvaara",
"jsulz",
"appvoid",
"lhoestq",
"desklockgun",
"Nymbo",
"NouRed",
"IAmTheCollector",
"ajibawa-2023",
"wanghaofan",
"bartowski",
"sumandas",
"Arakinas",
"carsenk",
"jphme",
"Shehab007",
"handfuloftitty",
"philipp-zettl"
],
"count": 21
},
{
"reaction": "🤗",
"users": [
"burtenshaw",
"victor",
"hvaara",
"jsulz",
"John6666",
"Nymbo",
"bartowski",
"carsenk",
"rAIfle",
"handfuloftitty"
],
"count": 10
}
] | 2024-08-21T13:01:59.000Z | 2024-08-22T12:45:00.128Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a813dedbb9e28866a91b27/zs-RWFuXs17IfPUhxQaei.jpeg",
"fullname": "appvoid",
"name": "appvoid",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 35,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6678bb746f2ac48ee684695a/4eNpaO33Pd51oKPt9Lkd_.jpeg",
"fullname": "Felipe Marcos de Abreu Aquino",
"name": "Felipe2231",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64aea8ff67511bd3d965697b/Jxn52EmDF5RApJh8antxn.jpeg",
"fullname": "Feynman Innovations",
"name": "ajibawa-2023",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 137,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6435718aaaef013d1aec3b8b/XKf-8MA47tjVAM6SCX0MP.jpeg",
"fullname": "Bartowski",
"name": "bartowski",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 2735,
"isFollowing": false
}
] | /posts/clem/257466309291875 | 3,625 | 4 |
134308950278909 | [
{
"type": "mention",
"value": null,
"raw": "@nb2375",
"resource": null,
"url": null,
"href": null,
"user": "nb2375",
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " welcome to HF!",
"raw": " welcome to HF!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | @nb2375 welcome to HF! | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg",
"fullname": "Clem 🤗",
"name": "clem",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 1734,
"isFollowing": false
} | [] | [
{
"avatarUrl": "/avatars/f139f4248109ac02e57293971638a57e.svg",
"fullname": "nathan benaich",
"name": "nb2375",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1
}
] | [
{
"reaction": "👍",
"users": [
"John6666",
"victor",
"merterbak"
],
"count": 3
},
{
"reaction": "🚀",
"users": [
"lucasjin"
],
"count": 1
}
] | 2024-08-21T13:00:07.000Z | 2024-08-21T13:00:07.023Z | [] | /posts/clem/134308950278909 | 1,622 | 0 |
920252466669962 | [
{
"type": "text",
"value": "Falcon Mamba now available now in llama.cpp !",
"raw": "Falcon Mamba now available now in llama.cpp !",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Check out GGUF files uploaded here: ",
"raw": "Check out GGUF files uploaded here: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/tiiuae/falconmamba-7b-66b9a580324dd1598b0f6d4a",
"resource": {
"type": "collection",
"id": "tiiuae/falconmamba-7b-66b9a580324dd1598b0f6d4a",
"discussionNum": null
},
"url": "https://huggingface.co/collections/tiiuae/falconmamba-7b-66b9a580324dd1598b0f6d4a",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Falcon Mamba now available now in llama.cpp !
Check out GGUF files uploaded here: https://huggingface.co/collections/tiiuae/falconmamba-7b-66b9a580324dd1598b0f6d4a | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648631057413-noauth.png",
"fullname": "Younes Belkada",
"name": "ybelkada",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 415,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"John6666",
"s3nh",
"clem",
"victor",
"pcuenq"
],
"count": 5
},
{
"reaction": "❤️",
"users": [
"clem",
"patrickbdevaney",
"pcuenq"
],
"count": 3
},
{
"reaction": "🚀",
"users": [
"Felladrin",
"patrickbdevaney"
],
"count": 2
}
] | 2024-08-21T09:36:51.000Z | 2024-08-22T02:44:17.604Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg",
"fullname": "Clem 🤗",
"name": "clem",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 1734,
"isFollowing": false
},
{
"avatarUrl": "/avatars/2fcd9cf7ed42f91172e47bd65a150d1b.svg",
"fullname": "Patrick Devaney",
"name": "patrickbdevaney",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 3,
"isFollowing": false
}
] | /posts/ybelkada/920252466669962 | 2,514 | 2 |
331791404657365 | [
{
"type": "text",
"value": "The Minimalist Spaces That May Be Helpful !!",
"raw": "The Minimalist Spaces That May Be Helpful !!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "GrabDoc V | GrabDoc | Type Byte | SD3 CLI",
"raw": "GrabDoc V | GrabDoc | Type Byte | SD3 CLI",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- GrabDoc V: ",
"raw": "- GrabDoc V: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/prithivMLmods/GRABDOC-V",
"resource": {
"type": "space",
"id": "prithivMLmods/GRABDOC-V",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/prithivMLmods/GRABDOC-V",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- GrabDoc: ",
"raw": "- GrabDoc: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/prithivMLmods/GRAB-DOC",
"resource": {
"type": "space",
"id": "prithivMLmods/GRAB-DOC",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/prithivMLmods/GRAB-DOC",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Type Byte: ",
"raw": "- Type Byte: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/prithivMLmods/Type-Byte",
"resource": {
"type": "space",
"id": "prithivMLmods/Type-Byte",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/prithivMLmods/Type-Byte",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- SD3 CLI: ",
"raw": "- SD3 CLI: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/prithivMLmods/SD3-CLI",
"resource": {
"type": "space",
"id": "prithivMLmods/SD3-CLI",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/prithivMLmods/SD3-CLI",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | The Minimalist Spaces That May Be Helpful !!
GrabDoc V | GrabDoc | Type Byte | SD3 CLI
- GrabDoc V: https://huggingface.co/spaces/prithivMLmods/GRABDOC-V
- GrabDoc: https://huggingface.co/spaces/prithivMLmods/GRAB-DOC
- Type Byte: https://huggingface.co/spaces/prithivMLmods/Type-Byte
- SD3 CLI: https://huggingface.co/spaces/prithivMLmods/SD3-CLI | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65bb837dbfb878f46c77de4c/UVtVbF_3rdt0DC8xTkpL1.jpeg",
"fullname": "Prithiv Sakthi",
"name": "prithivMLmods",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 342,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/SS1pEem2UHJSefphnuno1.mp4"
},
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/Re29MmGzlHHh9q0lhQxJm.mp4"
},
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/y3SyrWfG_Xkag6RXtCMRV.mp4"
},
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/4LYlGSE_bBImo_Nx033jC.mp4"
}
] | [] | [
{
"reaction": "👍",
"users": [
"John6666",
"victor",
"Nymbo",
"prithivMLmods",
"Ngrthm"
],
"count": 5
},
{
"reaction": "🤝",
"users": [
"Ngrthm"
],
"count": 1
}
] | 2024-08-21T08:03:33.000Z | 2024-08-24T14:50:03.375Z | [] | /posts/prithivMLmods/331791404657365 | 2,983 | 0 |
206181944104420 | [
{
"type": "text",
"value": "Woman.ru Forum Posts Dataset - ",
"raw": "Woman.ru Forum Posts Dataset - ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/nyuuzyou/womanru-posts",
"resource": {
"type": "dataset",
"id": "nyuuzyou/womanru-posts",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/nyuuzyou/womanru-posts",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "📊 Dataset highlights:",
"raw": "📊 Dataset highlights:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- 1,308,238 forum posts extracted from Woman.ru",
"raw": "- 1,308,238 forum posts extracted from Woman.ru",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Includes original posts and replies from various threads",
"raw": "- Includes original posts and replies from various threads",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Each entry contains URL, title, original post, date, and replies",
"raw": "- Each entry contains URL, title, original post, date, and replies",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Primarily in Russian language",
"raw": "- Primarily in Russian language",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Covers a wide range of topics relevant to the site's female-focused audience",
"raw": "- Covers a wide range of topics relevant to the site's female-focused audience",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Spans nearly two decades of discussions, from around 2005 to 2024",
"raw": "- Spans nearly two decades of discussions, from around 2005 to 2024",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Dedicated to public domain under Creative Commons Zero (CC0) license",
"raw": "- Dedicated to public domain under Creative Commons Zero (CC0) license",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🌐 Sourced from Woman.ru, one of the most popular women's information and entertainment portals in the Russian Internet.",
"raw": "🌐 Sourced from Woman.ru, one of the most popular women's information and entertainment portals in the Russian Internet.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Woman.ru Forum Posts Dataset - https://huggingface.co/datasets/nyuuzyou/womanru-posts
📊 Dataset highlights:
- 1,308,238 forum posts extracted from Woman.ru
- Includes original posts and replies from various threads
- Each entry contains URL, title, original post, date, and replies
- Primarily in Russian language
- Covers a wide range of topics relevant to the site's female-focused audience
- Spans nearly two decades of discussions, from around 2005 to 2024
- Dedicated to public domain under Creative Commons Zero (CC0) license
🌐 Sourced from Woman.ru, one of the most popular women's information and entertainment portals in the Russian Internet. | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/643ac5d2e2b979ae6144d68c/Z7PCNopn4cQeAYnVJDoqG.png",
"fullname": "nyuuzyou",
"name": "nyuuzyou",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 58,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666",
"AlexPoto",
"motexture"
],
"count": 3
},
{
"reaction": "🚀",
"users": [
"ajibawa-2023",
"kristaller486"
],
"count": 2
}
] | 2024-08-21T01:48:40.000Z | 2024-08-21T04:55:48.510Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64aea8ff67511bd3d965697b/Jxn52EmDF5RApJh8antxn.jpeg",
"fullname": "Feynman Innovations",
"name": "ajibawa-2023",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 137,
"isFollowing": false
}
] | /posts/nyuuzyou/206181944104420 | 1,328 | 1 |
585285320628223 | [
{
"type": "text",
"value": "Cooked up a cool & much faster AI voice assistant space that also supports speech translation (with seamless-expressive). Start with the phrase \"Please translate\" followed by the speech you'd like to translate, to activate speech translation mode. Using opensource LLMs (Llama 3, Mistral etc) with edge tts for voice assistant and seamless-expressive for speech translation.",
"raw": "Cooked up a cool & much faster AI voice assistant space that also supports speech translation (with seamless-expressive). Start with the phrase \"Please translate\" followed by the speech you'd like to translate, to activate speech translation mode. Using opensource LLMs (Llama 3, Mistral etc) with edge tts for voice assistant and seamless-expressive for speech translation.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Give it a try: ",
"raw": "Give it a try: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/Jaward/optimus",
"resource": {
"type": "space",
"id": "Jaward/optimus",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/Jaward/optimus",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Cooked up a cool & much faster AI voice assistant space that also supports speech translation (with seamless-expressive). Start with the phrase "Please translate" followed by the speech you'd like to translate, to activate speech translation mode. Using opensource LLMs (Llama 3, Mistral etc) with edge tts for voice assistant and seamless-expressive for speech translation.
Give it a try: https://huggingface.co/spaces/Jaward/optimus
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg",
"fullname": "Jaward Sesay",
"name": "Jaward",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 189,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/jUv1TwJiTqrT7dlhTqy2c.mp4"
}
] | [] | [
{
"reaction": "👍",
"users": [
"John6666",
"nibomm",
"ajibawa-2023",
"victor"
],
"count": 4
}
] | 2024-08-20T23:52:05.000Z | 2024-08-21T00:50:50.638Z | [] | /posts/Jaward/585285320628223 | 1,605 | 0 |
344076070854574 | [
{
"type": "text",
"value": "🚀 We are proud to release our latest suite of three image(s)-to-3D Gradio demos and two new papers.",
"raw": "🚀 We are proud to release our latest suite of three image(s)-to-3D Gradio demos and two new papers.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "SpaRP (Unposed sparse views to 3D): ",
"raw": "SpaRP (Unposed sparse views to 3D): ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/sudo-ai/SpaRP",
"resource": {
"type": "space",
"id": "sudo-ai/SpaRP",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/sudo-ai/SpaRP",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2408.10195",
"resource": {
"type": "paper",
"id": "2408.10195",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2408.10195",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": "SpaRP: Fast 3D Object Reconstruction and Pose Estimation from Sparse\n Views (2408.10195)"
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "MeshFormer (",
"raw": "MeshFormer (",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "mention",
"value": null,
"raw": "@minghua",
"resource": null,
"url": null,
"href": null,
"user": "minghua",
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "mention",
"value": null,
"raw": "@NCJ",
"resource": null,
"url": null,
"href": null,
"user": "NCJ",
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " ): ",
"raw": " ): ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/sudo-ai/MeshFormer",
"resource": {
"type": "space",
"id": "sudo-ai/MeshFormer",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/sudo-ai/MeshFormer",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2408.10198",
"resource": {
"type": "paper",
"id": "2408.10198",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2408.10198",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": "MeshFormer: High-Quality Mesh Generation with 3D-Guided Reconstruction\n Model (2408.10198)"
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "MeshLRM-reproduced (",
"raw": "MeshLRM-reproduced (",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "mention",
"value": null,
"raw": "@sarahwei0210",
"resource": null,
"url": null,
"href": null,
"user": "sarahwei0210",
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " ): ",
"raw": " ): ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/sudo-ai/MeshLRM",
"resource": {
"type": "space",
"id": "sudo-ai/MeshLRM",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/sudo-ai/MeshLRM",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Great thanks to ",
"raw": "Great thanks to ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "mention",
"value": null,
"raw": "@angli66",
"resource": null,
"url": null,
"href": null,
"user": "angli66",
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " for his many efforts in preparing these demos!",
"raw": " for his many efforts in preparing these demos!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 🚀 We are proud to release our latest suite of three image(s)-to-3D Gradio demos and two new papers.
SpaRP (Unposed sparse views to 3D): https://huggingface.co/spaces/sudo-ai/SpaRP https://huggingface.co/papers/2408.10195
MeshFormer (@minghua @NCJ ): https://huggingface.co/spaces/sudo-ai/MeshFormer https://huggingface.co/papers/2408.10198
MeshLRM-reproduced (@sarahwei0210 ): https://huggingface.co/spaces/sudo-ai/MeshLRM
Great thanks to @angli66 for his many efforts in preparing these demos! | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/vmXEWka_q2a4mBDNjbMuY.jpeg",
"fullname": "Chao Xu",
"name": "chaoxu",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 16,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6468a48cff18750165a5aad2/4dQCaJ8MsLxDTRCcLzx13.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6468a48cff18750165a5aad2/5WQK8-3dJP_B5TKIA2KXW.png"
}
] | [
{
"avatarUrl": "/avatars/40c767d07b26e239cb44be6e5bb5e029.svg",
"fullname": "Ang Li",
"name": "angli66",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null
},
{
"avatarUrl": "/avatars/a11741787b884cc865c1b7e9e6a7ec24.svg",
"fullname": "Minghua Liu",
"name": "minghua",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 4
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60747cbf3ea03830676542b5/wGr1Jzz520JM9nZ-UcLyb.png",
"fullname": "Chong Zeng",
"name": "NCJ",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3
},
{
"avatarUrl": "/avatars/e6e24e993fc4f3e9c7026b4c34bd1a25.svg",
"fullname": "Xinyue Wei",
"name": "sarahwei0210",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null
}
] | [
{
"reaction": "🔥",
"users": [
"ajibawa-2023",
"osanseviero",
"Tonic",
"victor",
"angli66",
"CarolBonk",
"Vovancho1restored1",
"merterbak"
],
"count": 8
},
{
"reaction": "🤗",
"users": [
"chaoxu",
"John6666",
"osanseviero",
"Tonic",
"victor",
"angli66"
],
"count": 6
},
{
"reaction": "🤯",
"users": [
"Tonic",
"angli66"
],
"count": 2
}
] | 2024-08-20T23:32:44.000Z | 2024-08-20T23:38:52.970Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/vmXEWka_q2a4mBDNjbMuY.jpeg",
"fullname": "Chao Xu",
"name": "chaoxu",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 16,
"isFollowing": false
}
] | /posts/chaoxu/344076070854574 | 1,877 | 1 |
561247041952845 | [
{
"type": "text",
"value": "'Legal Dictionary GPT' is now completely trained and ready for Open Source release to the world! Trained on 10,000 rows of legal definitions, Legal Dictionary GPT is your go-to resource for everything related to the first step in understanding the law, defining it. The model is free and publicly available for anyone to use. ",
"raw": "'Legal Dictionary GPT' is now completely trained and ready for Open Source release to the world! Trained on 10,000 rows of legal definitions, Legal Dictionary GPT is your go-to resource for everything related to the first step in understanding the law, defining it. The model is free and publicly available for anyone to use. ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Model Link: ",
"raw": "Model Link: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://platform.openai.com/playground/chat?preset=eCrKdaPe9cnMnyTETqWDCQAU",
"resource": null,
"url": null,
"href": "https://platform.openai.com/playground/chat?preset=eCrKdaPe9cnMnyTETqWDCQAU",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Knowledge Base Bots are internal facing as opposed to external facing LLM models, that are either fine tuned or RAG tuned, generally on systems and processes related data. ",
"raw": "Knowledge Base Bots are internal facing as opposed to external facing LLM models, that are either fine tuned or RAG tuned, generally on systems and processes related data. ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Learn more about Knowledge Base Bots at our website: ",
"raw": "Learn more about Knowledge Base Bots at our website: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://knowledgebasebots.com/",
"resource": null,
"url": null,
"href": "https://knowledgebasebots.com/",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 'Legal Dictionary GPT' is now completely trained and ready for Open Source release to the world! Trained on 10,000 rows of legal definitions, Legal Dictionary GPT is your go-to resource for everything related to the first step in understanding the law, defining it. The model is free and publicly available for anyone to use.
Model Link: https://platform.openai.com/playground/chat?preset=eCrKdaPe9cnMnyTETqWDCQAU
Knowledge Base Bots are internal facing as opposed to external facing LLM models, that are either fine tuned or RAG tuned, generally on systems and processes related data.
Learn more about Knowledge Base Bots at our website:
https://knowledgebasebots.com/
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png",
"fullname": "Richard A Aragon",
"name": "TuringsSolutions",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 148,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/64274b69ba6cef0a6ebb0fd6/esSLgzu2jYFzsZVpGaTCI.webp"
}
] | [] | [
{
"reaction": "❤️",
"users": [
"LeroyDyer",
"mmx31",
"combin8"
],
"count": 3
},
{
"reaction": "➕",
"users": [
"hellork"
],
"count": 1
}
] | 2024-08-20T18:40:59.000Z | 2024-08-20T18:41:45.788Z | [] | /posts/TuringsSolutions/561247041952845 | 1,307 | 0 |
564451165105700 | [
{
"type": "text",
"value": "🚀 Meet the new GLiNER architecture 🚀",
"raw": "🚀 Meet the new GLiNER architecture 🚀",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "GLiNER revolutionized zero-shot NER by demonstrating that lightweight encoders can achieve excellent results. We're excited to continue R&D with this spirit 🔥. Our new bi-encoder and poly-encoder architectures were developed to address the main limitations of the original GLiNER architecture and bring the following new possibilities:",
"raw": "GLiNER revolutionized zero-shot NER by demonstrating that lightweight encoders can achieve excellent results. We're excited to continue R&D with this spirit 🔥. Our new bi-encoder and poly-encoder architectures were developed to address the main limitations of the original GLiNER architecture and bring the following new possibilities:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🔹 An unlimited number of entities can be recognized at once.",
"raw": "🔹 An unlimited number of entities can be recognized at once.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🔹Faster inference when entity embeddings are preprocessed.",
"raw": "🔹Faster inference when entity embeddings are preprocessed.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🔹Better generalization to unseen entities.",
"raw": "🔹Better generalization to unseen entities.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "While the bi-encoder architecture can lack inter-label understanding, we developed a poly-encoder architecture with post-fusion. It achieves the same or even better results on many benchmarking datasets compared to the original GLiNER, while still offering the listed advantages of bi-encoders.",
"raw": "While the bi-encoder architecture can lack inter-label understanding, we developed a poly-encoder architecture with post-fusion. It achieves the same or even better results on many benchmarking datasets compared to the original GLiNER, while still offering the listed advantages of bi-encoders.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Now, it’s possible to run GLiNER with hundreds of entities much faster and more reliably. ",
"raw": "Now, it’s possible to run GLiNER with hundreds of entities much faster and more reliably. ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "📌 Try the new models here:",
"raw": "📌 Try the new models here:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/knowledgator/gliner-bi-encoders-66c492ce224a51c54232657b",
"resource": {
"type": "collection",
"id": "knowledgator/gliner-bi-encoders-66c492ce224a51c54232657b",
"discussionNum": null
},
"url": "https://huggingface.co/collections/knowledgator/gliner-bi-encoders-66c492ce224a51c54232657b",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 🚀 Meet the new GLiNER architecture 🚀
GLiNER revolutionized zero-shot NER by demonstrating that lightweight encoders can achieve excellent results. We're excited to continue R&D with this spirit 🔥. Our new bi-encoder and poly-encoder architectures were developed to address the main limitations of the original GLiNER architecture and bring the following new possibilities:
🔹 An unlimited number of entities can be recognized at once.
🔹Faster inference when entity embeddings are preprocessed.
🔹Better generalization to unseen entities.
While the bi-encoder architecture can lack inter-label understanding, we developed a poly-encoder architecture with post-fusion. It achieves the same or even better results on many benchmarking datasets compared to the original GLiNER, while still offering the listed advantages of bi-encoders.
Now, it’s possible to run GLiNER with hundreds of entities much faster and more reliably.
📌 Try the new models here:
https://huggingface.co/collections/knowledgator/gliner-bi-encoders-66c492ce224a51c54232657b
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1658166666371-noauth.png",
"fullname": "Stepanov",
"name": "Ihor",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 15,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-08-20T14:23:50.000Z | 2024-08-23T12:02:57.147Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64aea8ff67511bd3d965697b/Jxn52EmDF5RApJh8antxn.jpeg",
"fullname": "Feynman Innovations",
"name": "ajibawa-2023",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 137,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1658166666371-noauth.png",
"fullname": "Stepanov",
"name": "Ihor",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 15,
"isFollowing": false
}
] | /posts/Ihor/564451165105700 | 713 | 4 |
591397214171352 | [
{
"type": "text",
"value": "𝗚𝗼𝗼𝗴𝗹𝗲 𝗽𝗮𝗽𝗲𝗿 : 𝘀𝗰𝗮𝗹𝗶𝗻𝗴 𝘂𝗽 𝗶𝗻𝗳𝗲𝗿𝗲𝗻𝗰𝗲 𝗰𝗼𝗺𝗽𝘂𝘁𝗲 𝗯𝗲𝗮𝘁𝘀 𝟭𝟰𝘅 𝗹𝗮𝗿𝗴𝗲𝗿 𝗺𝗼𝗱𝗲𝗹𝘀 🚀",
"raw": "𝗚𝗼𝗼𝗴𝗹𝗲 𝗽𝗮𝗽𝗲𝗿 : 𝘀𝗰𝗮𝗹𝗶𝗻𝗴 𝘂𝗽 𝗶𝗻𝗳𝗲𝗿𝗲𝗻𝗰𝗲 𝗰𝗼𝗺𝗽𝘂𝘁𝗲 𝗯𝗲𝗮𝘁𝘀 𝟭𝟰𝘅 𝗹𝗮𝗿𝗴𝗲𝗿 𝗺𝗼𝗱𝗲𝗹𝘀 🚀",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Remember scaling laws? These are empirical laws that say \"the bigger your model, the better it gets\". More precisely, \"as your compute increases exponentially, loss decreases in a linear fashion\". They have wild implications, suggesting that spending 100x more training compute would make you super-LLMs. That's why companies are racing to build the biggest AI superclusters ever, and Meta bought 350k H100 GPUs, which probably cost in the order of $1B.",
"raw": "Remember scaling laws? These are empirical laws that say \"the bigger your model, the better it gets\". More precisely, \"as your compute increases exponentially, loss decreases in a linear fashion\". They have wild implications, suggesting that spending 100x more training compute would make you super-LLMs. That's why companies are racing to build the biggest AI superclusters ever, and Meta bought 350k H100 GPUs, which probably cost in the order of $1B.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "But think of this : we're building huge reasoning machines, but only ask them to do one pass through the model to get one token of the final answer : i.e., we expend a minimal effort on inference. That's like building a Caterpillar truck and making it run on a lawnmower's motor. 🚚🛵 Couldn't we optimize this? 🤔",
"raw": "But think of this : we're building huge reasoning machines, but only ask them to do one pass through the model to get one token of the final answer : i.e., we expend a minimal effort on inference. That's like building a Caterpillar truck and making it run on a lawnmower's motor. 🚚🛵 Couldn't we optimize this? 🤔",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "💡 So instead of scaling up on training by training even bigger models on many more trillions of tokens, Google researchers explored this under-explored avenue : scaling up inference compute.",
"raw": "💡 So instead of scaling up on training by training even bigger models on many more trillions of tokens, Google researchers explored this under-explored avenue : scaling up inference compute.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "They combine two methods to use more compute : either a reviser that iterated to adapt the model distribution, or generate N different completions (for instance through Beam Search) and select only the best one using an additional verifier model.",
"raw": "They combine two methods to use more compute : either a reviser that iterated to adapt the model distribution, or generate N different completions (for instance through Beam Search) and select only the best one using an additional verifier model.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "They use a Palm-2 model (released in May 23) on the MATH dataset : Palm-2 has the advantage of getting a low performance on MATH, but not zero, so that improvements will be noticeable.",
"raw": "They use a Palm-2 model (released in May 23) on the MATH dataset : Palm-2 has the advantage of getting a low performance on MATH, but not zero, so that improvements will be noticeable.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "And the results show that for the same fixed amount of inference compute:",
"raw": "And the results show that for the same fixed amount of inference compute:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "💥 a smaller model with more effort on decoding beats a x14 bigger model using naive greedy sampling.",
"raw": "💥 a smaller model with more effort on decoding beats a x14 bigger model using naive greedy sampling.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "That means that you can divide your training costs by 14 and still get the same perf for the same inference cost!",
"raw": "That means that you can divide your training costs by 14 and still get the same perf for the same inference cost!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Take that, scaling laws. Mark Zuckerberg, you're welcome, hope I can get some of these H100s.",
"raw": "Take that, scaling laws. Mark Zuckerberg, you're welcome, hope I can get some of these H100s.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Read the paper here 👉 ",
"raw": "Read the paper here 👉 ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2408.03314",
"resource": {
"type": "paper",
"id": "2408.03314",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2408.03314",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": "Scaling LLM Test-Time Compute Optimally can be More Effective than\n Scaling Model Parameters (2408.03314)"
}
] | 𝗚𝗼𝗼𝗴𝗹𝗲 𝗽𝗮𝗽𝗲𝗿 : 𝘀𝗰𝗮𝗹𝗶𝗻𝗴 𝘂𝗽 𝗶𝗻𝗳𝗲𝗿𝗲𝗻𝗰𝗲 𝗰𝗼𝗺𝗽𝘂𝘁𝗲 𝗯𝗲𝗮𝘁𝘀 𝟭𝟰𝘅 𝗹𝗮𝗿𝗴𝗲𝗿 𝗺𝗼𝗱𝗲𝗹𝘀 🚀
Remember scaling laws? These are empirical laws that say "the bigger your model, the better it gets". More precisely, "as your compute increases exponentially, loss decreases in a linear fashion". They have wild implications, suggesting that spending 100x more training compute would make you super-LLMs. That's why companies are racing to build the biggest AI superclusters ever, and Meta bought 350k H100 GPUs, which probably cost in the order of $1B.
But think of this : we're building huge reasoning machines, but only ask them to do one pass through the model to get one token of the final answer : i.e., we expend a minimal effort on inference. That's like building a Caterpillar truck and making it run on a lawnmower's motor. 🚚🛵 Couldn't we optimize this? 🤔
💡 So instead of scaling up on training by training even bigger models on many more trillions of tokens, Google researchers explored this under-explored avenue : scaling up inference compute.
They combine two methods to use more compute : either a reviser that iterated to adapt the model distribution, or generate N different completions (for instance through Beam Search) and select only the best one using an additional verifier model.
They use a Palm-2 model (released in May 23) on the MATH dataset : Palm-2 has the advantage of getting a low performance on MATH, but not zero, so that improvements will be noticeable.
And the results show that for the same fixed amount of inference compute:
💥 a smaller model with more effort on decoding beats a x14 bigger model using naive greedy sampling.
That means that you can divide your training costs by 14 and still get the same perf for the same inference cost!
Take that, scaling laws. Mark Zuckerberg, you're welcome, hope I can get some of these H100s.
Read the paper here 👉 https://huggingface.co/papers/2408.03314 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg",
"fullname": "Aymeric Roucher",
"name": "m-ric",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 476,
"isFollowing": false
} | [] | [] | [
{
"reaction": "➕",
"users": [
"deundido",
"victor",
"todaybite",
"rreed-pha",
"lehoanganh298",
"ZeroWw",
"prithivMLmods",
"John6666",
"Joseph717171",
"inflatebot",
"diwank",
"aheuillet",
"nbroad",
"MoritzLaurer"
],
"count": 14
},
{
"reaction": "🔥",
"users": [
"ajibawa-2023",
"osanseviero",
"GPT007",
"inflatebot",
"diwank"
],
"count": 5
},
{
"reaction": "🤯",
"users": [
"inflatebot"
],
"count": 1
}
] | 2024-08-20T12:26:40.000Z | 2024-08-21T05:26:04.050Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64aea8ff67511bd3d965697b/Jxn52EmDF5RApJh8antxn.jpeg",
"fullname": "Feynman Innovations",
"name": "ajibawa-2023",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 137,
"isFollowing": false
}
] | /posts/m-ric/591397214171352 | 3,381 | 1 |
172172945948569 | [
{
"type": "text",
"value": "Just added the newly released xGen-MM v1.5 foundational Large Multimodal Models (LMMs) developed by Salesforce AI Research to my xGen-MM HF Space ",
"raw": "Just added the newly released xGen-MM v1.5 foundational Large Multimodal Models (LMMs) developed by Salesforce AI Research to my xGen-MM HF Space ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/maxiw/XGen-MM",
"resource": {
"type": "space",
"id": "maxiw/XGen-MM",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/maxiw/XGen-MM",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Just added the newly released xGen-MM v1.5 foundational Large Multimodal Models (LMMs) developed by Salesforce AI Research to my xGen-MM HF Space https://huggingface.co/spaces/maxiw/XGen-MM | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6313a26b2c7ffdd9f50187ed/MTBOHg2bMcuOMWFLCZ86L.png",
"fullname": "Maxi",
"name": "maxiw",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 48,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"Sri-Vigneshwar-DJ",
"osanseviero",
"victor",
"mrdbourke",
"John6666",
"alielfilali01"
],
"count": 6
}
] | 2024-08-20T08:24:51.000Z | 2024-08-20T22:52:10.501Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1659922312540-610a70f35a40a8bfebfbf09b.jpeg",
"fullname": "Daniel Bourke",
"name": "mrdbourke",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 73,
"isFollowing": false
}
] | /posts/maxiw/172172945948569 | 2,268 | 2 |
520926551147150 | [
{
"type": "text",
"value": "🚀 We will be generating a preference dataset for DPO/ORPO and cleaning it with AI feedback during our upcoming meetup!",
"raw": "🚀 We will be generating a preference dataset for DPO/ORPO and cleaning it with AI feedback during our upcoming meetup!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "In this session, we'll walk you through the essentials of building a distilabel pipeline by exploring two key use cases: cleaning an existing dataset and generating a preference dataset for DPO/ORPO. You’ll also learn how to make the most of AI feedback, integrating Argilla to gather human feedback and improve the overall data quality. ",
"raw": "In this session, we'll walk you through the essentials of building a distilabel pipeline by exploring two key use cases: cleaning an existing dataset and generating a preference dataset for DPO/ORPO. You’ll also learn how to make the most of AI feedback, integrating Argilla to gather human feedback and improve the overall data quality. ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "This session is perfect for you",
"raw": "This session is perfect for you",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- if you’re getting started with distilabel or synthetic data",
"raw": "- if you’re getting started with distilabel or synthetic data",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- if you want to learn how to use LLM inference endpoints for **free**",
"raw": "- if you want to learn how to use LLM inference endpoints for **free**",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- if you want to discover new functionalities",
"raw": "- if you want to discover new functionalities",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- if you want to provide us with new feedback",
"raw": "- if you want to provide us with new feedback",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Sign up here: ",
"raw": "Sign up here: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://lu.ma/dt0c7jru",
"resource": null,
"url": null,
"href": "https://lu.ma/dt0c7jru",
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 🚀 We will be generating a preference dataset for DPO/ORPO and cleaning it with AI feedback during our upcoming meetup!
In this session, we'll walk you through the essentials of building a distilabel pipeline by exploring two key use cases: cleaning an existing dataset and generating a preference dataset for DPO/ORPO. You’ll also learn how to make the most of AI feedback, integrating Argilla to gather human feedback and improve the overall data quality.
This session is perfect for you
- if you’re getting started with distilabel or synthetic data
- if you want to learn how to use LLM inference endpoints for **free**
- if you want to discover new functionalities
- if you want to provide us with new feedback
Sign up here: https://lu.ma/dt0c7jru | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1677141720071-634ff41ff32062e9eb7b06a3.jpeg",
"fullname": "David Berenstein",
"name": "davidberenstein1957",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 148,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"davidberenstein1957",
"osanseviero",
"plaguss",
"not-lain",
"sdiazlor",
"victor",
"legolasyiu",
"burtenshaw"
],
"count": 8
},
{
"reaction": "🚀",
"users": [
"davidberenstein1957",
"not-lain",
"BramVanroy",
"victor",
"NickyNicky"
],
"count": 5
},
{
"reaction": "🧠",
"users": [
"not-lain",
"John6666",
"davidberenstein1957",
"burtenshaw",
"Saugatkafley"
],
"count": 5
}
] | 2024-08-20T07:56:00.000Z | 2024-08-20T07:56:00.256Z | [] | /posts/davidberenstein1957/520926551147150 | 2,990 | 0 |
820845806365894 | [
{
"type": "text",
"value": "ChatFrame-Persian is first expert Persian language model in Iran.",
"raw": "ChatFrame-Persian is first expert Persian language model in Iran.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | ChatFrame-Persian is first expert Persian language model in Iran. | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/653c2bc15e4f2c3e884b6743/qtq45xiTNk8GrPL0Irbar.jpeg",
"fullname": "AIEXPLORE",
"name": "explorewithai",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 7,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"cuhawku1",
"explorewithai",
"victor",
"Tonic",
"prithivMLmods"
],
"count": 5
},
{
"reaction": "🚀",
"users": [
"explorewithai"
],
"count": 1
},
{
"reaction": "🧠",
"users": [
"explorewithai"
],
"count": 1
}
] | 2024-08-19T22:09:00.000Z | 2024-08-19T22:09:00.213Z | [] | /posts/explorewithai/820845806365894 | 1,988 | 0 |
265308668840940 | [
{
"type": "text",
"value": "🚀 How The Washington Post Uses AI to Empower Journalists 🔍📰",
"raw": "🚀 How The Washington Post Uses AI to Empower Journalists 🔍📰",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "An exciting new example in the world of AI-assisted journalism! The Post has developed an internal tool called \"Hayatacker\" that's enhancing in-depth reporting. Here's why it matters:",
"raw": "An exciting new example in the world of AI-assisted journalism! The Post has developed an internal tool called \"Hayatacker\" that's enhancing in-depth reporting. Here's why it matters:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🎥 What it does:",
"raw": "🎥 What it does:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "• Extracts stills from video files",
"raw": "• Extracts stills from video files",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "• Processes on-screen text",
"raw": "• Processes on-screen text",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "• Labels objects in images",
"raw": "• Labels objects in images",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🗳️ First big project:",
"raw": "🗳️ First big project:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Analyzed 745 Republican campaign ads on immigration (Jan-Jun 2024)",
"raw": "Analyzed 745 Republican campaign ads on immigration (Jan-Jun 2024)",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🤝 Human-AI collaboration:",
"raw": "🤝 Human-AI collaboration:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "• AI extracts and organizes data",
"raw": "• AI extracts and organizes data",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "• Reporters verify and analyze findings",
"raw": "• Reporters verify and analyze findings",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🔎 Thorough approach:",
"raw": "🔎 Thorough approach:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "• Manual review of all 745 ads",
"raw": "• Manual review of all 745 ads",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "• Reverse image searches when context is lacking",
"raw": "• Reverse image searches when context is lacking",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "• Cross-referencing with AdImpact transcripts",
"raw": "• Cross-referencing with AdImpact transcripts",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "💡 Key insight from WaPo's Senior Editor for AI strategy Phoebe Connelly:",
"raw": "💡 Key insight from WaPo's Senior Editor for AI strategy Phoebe Connelly:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "\"The more exciting choice is putting AI in the hands of reporters early on in the process.\"",
"raw": "\"The more exciting choice is putting AI in the hands of reporters early on in the process.\"",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "This tool showcases how AI can augment journalistic capabilities without replacing human insight and verification. It's a powerful example of technology enhancing, not replacing, traditional reporting skills.",
"raw": "This tool showcases how AI can augment journalistic capabilities without replacing human insight and verification. It's a powerful example of technology enhancing, not replacing, traditional reporting skills.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "👉 Read the full article and the methodology: ",
"raw": "👉 Read the full article and the methodology: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://www.washingtonpost.com/elections/interactive/2024/republican-campaign-ads-immigration-border-security/",
"resource": null,
"url": null,
"href": "https://www.washingtonpost.com/elections/interactive/2024/republican-campaign-ads-immigration-border-security/",
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 🚀 How The Washington Post Uses AI to Empower Journalists 🔍📰
An exciting new example in the world of AI-assisted journalism! The Post has developed an internal tool called "Hayatacker" that's enhancing in-depth reporting. Here's why it matters:
🎥 What it does:
• Extracts stills from video files
• Processes on-screen text
• Labels objects in images
🗳️ First big project:
Analyzed 745 Republican campaign ads on immigration (Jan-Jun 2024)
🤝 Human-AI collaboration:
• AI extracts and organizes data
• Reporters verify and analyze findings
🔎 Thorough approach:
• Manual review of all 745 ads
• Reverse image searches when context is lacking
• Cross-referencing with AdImpact transcripts
💡 Key insight from WaPo's Senior Editor for AI strategy Phoebe Connelly:
"The more exciting choice is putting AI in the hands of reporters early on in the process."
This tool showcases how AI can augment journalistic capabilities without replacing human insight and verification. It's a powerful example of technology enhancing, not replacing, traditional reporting skills.
👉 Read the full article and the methodology: https://www.washingtonpost.com/elections/interactive/2024/republican-campaign-ads-immigration-border-security/ | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg",
"fullname": "Florent Daudens",
"name": "fdaudens",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 364,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"YannisTevissen",
"OWNRWallet",
"Smooke",
"jkohnbryan78",
"MefhigosetH",
"victor",
"adamelliotfields",
"cschroeder",
"AlkaouDembele",
"Nelathan"
],
"count": 10
},
{
"reaction": "👍",
"users": [
"ajibawa-2023"
],
"count": 1
},
{
"reaction": "🧠",
"users": [
"louisbrulenaudet"
],
"count": 1
}
] | 2024-08-19T17:03:18.000Z | 2024-08-19T22:48:04.806Z | [] | /posts/fdaudens/265308668840940 | 2,898 | 1 |
688861054321368 | [
{
"type": "text",
"value": "🤗 Serving Meta Llama 3.1 405B on Google Cloud is now possible via the Hugging Face Deep Learning Containers (DLCs) for Text Generation Inference (TGI)",
"raw": "🤗 Serving Meta Llama 3.1 405B on Google Cloud is now possible via the Hugging Face Deep Learning Containers (DLCs) for Text Generation Inference (TGI)",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "In this post, we showcase how to deploy ",
"raw": "In this post, we showcase how to deploy ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
"resource": null,
"url": null,
"href": "https://huggingface.co/meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " on an A3 instance with 8 x H100 GPUs on Vertex AI",
"raw": " on an A3 instance with 8 x H100 GPUs on Vertex AI",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Thanks to the Hugging Face DLCs for TGI and Google Cloud Vertex AI, deploying a high-performance text generation container for serving Large Language Models (LLMs) has never been easier. And we’re not going to stop here – stay tuned as we enable more experiences to build AI with open models on Google Cloud!",
"raw": "Thanks to the Hugging Face DLCs for TGI and Google Cloud Vertex AI, deploying a high-performance text generation container for serving Large Language Models (LLMs) has never been easier. And we’re not going to stop here – stay tuned as we enable more experiences to build AI with open models on Google Cloud!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Read the full post at ",
"raw": "Read the full post at ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/llama31-on-vertex-ai",
"resource": null,
"url": null,
"href": "https://huggingface.co/blog/llama31-on-vertex-ai",
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 🤗 Serving Meta Llama 3.1 405B on Google Cloud is now possible via the Hugging Face Deep Learning Containers (DLCs) for Text Generation Inference (TGI)
In this post, we showcase how to deploy https://huggingface.co/meta-llama/Meta-Llama-3.1-405B-Instruct-FP8 on an A3 instance with 8 x H100 GPUs on Vertex AI
Thanks to the Hugging Face DLCs for TGI and Google Cloud Vertex AI, deploying a high-performance text generation container for serving Large Language Models (LLMs) has never been easier. And we’re not going to stop here – stay tuned as we enable more experiences to build AI with open models on Google Cloud!
Read the full post at https://huggingface.co/blog/llama31-on-vertex-ai | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60f0608166e5701b80ed3f02/BHso-wSWpR9b8b8CKvodC.jpeg",
"fullname": "Alvaro Bartolome",
"name": "alvarobartt",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 1724,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"maximuspowers",
"OWNRWallet",
"gabrielmbmb",
"not-lain",
"behindmask",
"pagezyhf",
"troelsfr"
],
"count": 7
},
{
"reaction": "👍",
"users": [
"whitebill",
"gabrielmbmb",
"ajibawa-2023",
"behindmask"
],
"count": 4
}
] | 2024-08-19T15:19:58.000Z | 2024-08-19T15:19:58.391Z | [] | /posts/alvarobartt/688861054321368 | 2,702 | 0 |
253240912294015 | [
{
"type": "mention",
"value": null,
"raw": "@Blane187",
"resource": null,
"url": null,
"href": null,
"user": "Blane187",
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " could you please modify the title of your blogpost? content is cool, title could be nicer imo ",
"raw": " could you please modify the title of your blogpost? content is cool, title could be nicer imo ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/Blane187/wtf-is-rvc",
"resource": null,
"url": null,
"href": "https://huggingface.co/blog/Blane187/wtf-is-rvc",
"user": null,
"lang": null,
"code": null,
"label": null
}
] | @Blane187 could you please modify the title of your blogpost? content is cool, title could be nicer imo https://huggingface.co/blog/Blane187/wtf-is-rvc | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6340651b388c3fa40f9a5bc0/av1C4_S7bHGxAzOu8lOmG.jpeg",
"fullname": "Adam Molnar",
"name": "lunarflu",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 334,
"isFollowing": false
} | [] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65e2f1cb4dbf9514fb475b48/0EwhfSfMCy8P2e7nJWaOO.jpeg",
"fullname": "Rico Ardiansyah",
"name": "Blane187",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 30
}
] | [
{
"reaction": "👀",
"users": [
"victor"
],
"count": 1
}
] | 2024-08-19T12:07:33.000Z | 2024-08-19T14:03:14.441Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65e2f1cb4dbf9514fb475b48/0EwhfSfMCy8P2e7nJWaOO.jpeg",
"fullname": "Rico Ardiansyah",
"name": "Blane187",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 30,
"isFollowing": false
}
] | /posts/lunarflu/253240912294015 | 1,033 | 3 |
261629960253144 | [
{
"type": "text",
"value": "🚀 Introducing Hugging Face Similar: a Chrome extension to find relevant datasets!",
"raw": "🚀 Introducing Hugging Face Similar: a Chrome extension to find relevant datasets!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "✨ Adds a \"Similar Datasets\" section to Hugging Face dataset pages",
"raw": "✨ Adds a \"Similar Datasets\" section to Hugging Face dataset pages",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🔍 Recommendations based on dataset READMEs",
"raw": "🔍 Recommendations based on dataset READMEs",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🏗️ Powered by ",
"raw": "🏗️ Powered by ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/chromadb",
"resource": null,
"url": null,
"href": "https://huggingface.co/chromadb",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " and ",
"raw": " and ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/Snowflake",
"resource": null,
"url": null,
"href": "https://huggingface.co/Snowflake",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " embeddings. ",
"raw": " embeddings. ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "You can try it here: ",
"raw": "You can try it here: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://chromewebstore.google.com/detail/hugging-face-similar/aijelnjllajooinkcpkpbhckbghghpnl?authuser=0&hl=en",
"resource": null,
"url": null,
"href": "https://chromewebstore.google.com/detail/hugging-face-similar/aijelnjllajooinkcpkpbhckbghghpnl?authuser=0&hl=en",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": ".",
"raw": ".",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "I am very happy to get feedback on whether this could be useful or not 🤗",
"raw": "I am very happy to get feedback on whether this could be useful or not 🤗",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 🚀 Introducing Hugging Face Similar: a Chrome extension to find relevant datasets!
✨ Adds a "Similar Datasets" section to Hugging Face dataset pages
🔍 Recommendations based on dataset READMEs
🏗️ Powered by https://huggingface.co/chromadb and https://huggingface.co/Snowflake embeddings.
You can try it here: https://chromewebstore.google.com/detail/hugging-face-similar/aijelnjllajooinkcpkpbhckbghghpnl?authuser=0&hl=en.
I am very happy to get feedback on whether this could be useful or not 🤗 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1627505688463-60107b385ac3e86b3ea4fc34.jpeg",
"fullname": "Daniel van Strien",
"name": "davanstrien",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 404,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🤗",
"users": [
"prithivMLmods",
"John6666",
"ajibawa-2023",
"lunarflu",
"osanseviero",
"victor",
"Aarifkhan",
"Nymbo"
],
"count": 8
},
{
"reaction": "🚀",
"users": [
"asoria",
"osanseviero"
],
"count": 2
},
{
"reaction": "🧠",
"users": [
"maximuspowers"
],
"count": 1
}
] | 2024-08-19T08:33:10.000Z | 2024-08-20T16:45:14.112Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64aea8ff67511bd3d965697b/Jxn52EmDF5RApJh8antxn.jpeg",
"fullname": "Feynman Innovations",
"name": "ajibawa-2023",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 137,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63de560a15266dd945f209ca/PeZf3IF-x7Qh8OcnKH12R.png",
"fullname": "MrDragonFox",
"name": "MrDragonFox",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 11,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1627505688463-60107b385ac3e86b3ea4fc34.jpeg",
"fullname": "Daniel van Strien",
"name": "davanstrien",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 404,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6686f5bd840ee769597d9e3e/zQyyDh-ExyFtBBUdEZLC_.png",
"fullname": "John Smith",
"name": "JohnS73123",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
}
] | /posts/davanstrien/261629960253144 | 3,151 | 4 |
519604948233675 | [
{
"type": "text",
"value": "ResShift 1-Click Windows, RunPod, Massed Compute, Kaggle Installers with Amazing Gradio APP and Batch Image Processing. ResShift is Efficient Diffusion Model for Image Super-resolution by Residual Shifting (NeurIPS 2023, Spotlight).",
"raw": "ResShift 1-Click Windows, RunPod, Massed Compute, Kaggle Installers with Amazing Gradio APP and Batch Image Processing. ResShift is Efficient Diffusion Model for Image Super-resolution by Residual Shifting (NeurIPS 2023, Spotlight).",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Official Repo : ",
"raw": "Official Repo : ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/zsyOAOA/ResShift",
"resource": null,
"url": null,
"href": "https://github.com/zsyOAOA/ResShift",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "I have developed a very advanced Gradio APP.",
"raw": "I have developed a very advanced Gradio APP.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Developed APP Scripts and Installers : ",
"raw": "Developed APP Scripts and Installers : ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://www.patreon.com/posts/110331752",
"resource": null,
"url": null,
"href": "https://www.patreon.com/posts/110331752",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Features",
"raw": "Features",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "It supports following tasks:",
"raw": "It supports following tasks:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Real-world image super-resolution",
"raw": "Real-world image super-resolution",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Bicubic (resize by Matlab) image super-resolution",
"raw": "Bicubic (resize by Matlab) image super-resolution",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Blind Face Restoration",
"raw": "Blind Face Restoration",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Automatically saving all generated image with same name + numbering if necessary",
"raw": "Automatically saving all generated image with same name + numbering if necessary",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Randomize seed feature for each generation",
"raw": "Randomize seed feature for each generation",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Batch image processing - give input and output folder paths and it batch process all images and saves",
"raw": "Batch image processing - give input and output folder paths and it batch process all images and saves",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "1-Click to install on Windows, RunPod, Massed Compute and Kaggle (free account)",
"raw": "1-Click to install on Windows, RunPod, Massed Compute and Kaggle (free account)",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Windows Requirements",
"raw": "Windows Requirements",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Python 3.10, FFmpeg, Cuda 11.8, C++ tools and Git",
"raw": "Python 3.10, FFmpeg, Cuda 11.8, C++ tools and Git",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "If it doesn't work make sure to below tutorial and install everything exactly as shown in this below tutorial",
"raw": "If it doesn't work make sure to below tutorial and install everything exactly as shown in this below tutorial",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://youtu.be/-NjNy7afOQ0",
"resource": null,
"url": null,
"href": "https://youtu.be/-NjNy7afOQ0",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "How to Install on Windows",
"raw": "How to Install on Windows",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Make sure that you have the above requirements",
"raw": "Make sure that you have the above requirements",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Extract files into a folder like c:/reshift_v1",
"raw": "Extract files into a folder like c:/reshift_v1",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Double click Windows_Install.bat and it will automatically install everything for you with an isolated virtual environment folder (VENV)",
"raw": "Double click Windows_Install.bat and it will automatically install everything for you with an isolated virtual environment folder (VENV)",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "After that double click Windows_Start_app.bat and start the app",
"raw": "After that double click Windows_Start_app.bat and start the app",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "When you first time use a task it will download necessary models (all under 500 MB) into accurate folders",
"raw": "When you first time use a task it will download necessary models (all under 500 MB) into accurate folders",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "If during download it fails, file gets corrupted sadly it doesn't verify that so delete files inside weights and restart",
"raw": "If during download it fails, file gets corrupted sadly it doesn't verify that so delete files inside weights and restart",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "How to Install on RunPod, Massed Compute, Kaggle",
"raw": "How to Install on RunPod, Massed Compute, Kaggle",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Follow the Massed_Compute_Instructions_READ.txt and Runpod_Instructions_READ.txt",
"raw": "Follow the Massed_Compute_Instructions_READ.txt and Runpod_Instructions_READ.txt",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "For Kaggle follow the notebook written steps",
"raw": "For Kaggle follow the notebook written steps",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "An example video of how to use my RunPod, Massed Compute scripts and Kaggle notebook can be seen",
"raw": "An example video of how to use my RunPod, Massed Compute scripts and Kaggle notebook can be seen",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://youtu.be/wG7oPp01COg",
"resource": null,
"url": null,
"href": "https://youtu.be/wG7oPp01COg",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | ResShift 1-Click Windows, RunPod, Massed Compute, Kaggle Installers with Amazing Gradio APP and Batch Image Processing. ResShift is Efficient Diffusion Model for Image Super-resolution by Residual Shifting (NeurIPS 2023, Spotlight).
Official Repo : https://github.com/zsyOAOA/ResShift
I have developed a very advanced Gradio APP.
Developed APP Scripts and Installers : https://www.patreon.com/posts/110331752
Features
It supports following tasks:
Real-world image super-resolution
Bicubic (resize by Matlab) image super-resolution
Blind Face Restoration
Automatically saving all generated image with same name + numbering if necessary
Randomize seed feature for each generation
Batch image processing - give input and output folder paths and it batch process all images and saves
1-Click to install on Windows, RunPod, Massed Compute and Kaggle (free account)
Windows Requirements
Python 3.10, FFmpeg, Cuda 11.8, C++ tools and Git
If it doesn't work make sure to below tutorial and install everything exactly as shown in this below tutorial
https://youtu.be/-NjNy7afOQ0
How to Install on Windows
Make sure that you have the above requirements
Extract files into a folder like c:/reshift_v1
Double click Windows_Install.bat and it will automatically install everything for you with an isolated virtual environment folder (VENV)
After that double click Windows_Start_app.bat and start the app
When you first time use a task it will download necessary models (all under 500 MB) into accurate folders
If during download it fails, file gets corrupted sadly it doesn't verify that so delete files inside weights and restart
How to Install on RunPod, Massed Compute, Kaggle
Follow the Massed_Compute_Instructions_READ.txt and Runpod_Instructions_READ.txt
For Kaggle follow the notebook written steps
An example video of how to use my RunPod, Massed Compute scripts and Kaggle notebook can be seen
https://youtu.be/wG7oPp01COg
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672531901326-6345bd89fe134dfd7a0dba40.png",
"fullname": "Furkan Gözükara",
"name": "MonsterMMORPG",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 368,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/K7p-mZHsz0BrVH0_DyfDa.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/69NNZ_fhvrAJ5skoJnpIb.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/FB0M3XZ0L8stSLd7Es9un.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/JEIpjchlx3sMYkX3zTQk8.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/jSjjmy81ctwFL9PH5UMZk.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/kMcMQS6N3dHaRN3utSxRI.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/n3a0cLYAJKAKR0vz3sB-Z.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/bGJsm0Q0mbmmWcpfjbgao.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/YEvOvnrGu7G0IuMf2qjha.png"
}
] | [] | [
{
"reaction": "🔥",
"users": [
"BhatiaDinesh"
],
"count": 1
},
{
"reaction": "🚀",
"users": [
"BhatiaDinesh"
],
"count": 1
},
{
"reaction": "➕",
"users": [
"ZeroWw"
],
"count": 1
}
] | 2024-08-19T01:20:07.000Z | 2024-08-22T23:24:27.627Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/no-auth/bpf5a9vcDOwcR3n34BEde.png",
"fullname": "Dinesh Bhatia",
"name": "BhatiaDinesh",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672531901326-6345bd89fe134dfd7a0dba40.png",
"fullname": "Furkan Gözükara",
"name": "MonsterMMORPG",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 368,
"isFollowing": false
},
{
"avatarUrl": "/avatars/54483699273ac58a4a6fe1fa4aab65fe.svg",
"fullname": "Robert Sinclair",
"name": "ZeroWw",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 75,
"isFollowing": false
}
] | /posts/MonsterMMORPG/519604948233675 | 1,013 | 4 |
587862319331275 | [
{
"type": "text",
"value": "Put together a small repo showing how to go from making your own fine-tuning dataset w/ services like Groq & Together to publishing that model on ollama.",
"raw": "Put together a small repo showing how to go from making your own fine-tuning dataset w/ services like Groq & Together to publishing that model on ollama.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "In my case I fine-tuned SmolLM-360M to be a better assistant for my Pi-Card (previous post) project. ",
"raw": "In my case I fine-tuned SmolLM-360M to be a better assistant for my Pi-Card (previous post) project. ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Check it out!",
"raw": "Check it out!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/nkasmanoff/ft-flow",
"resource": null,
"url": null,
"href": "https://github.com/nkasmanoff/ft-flow",
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Put together a small repo showing how to go from making your own fine-tuning dataset w/ services like Groq & Together to publishing that model on ollama.
In my case I fine-tuned SmolLM-360M to be a better assistant for my Pi-Card (previous post) project.
Check it out!
https://github.com/nkasmanoff/ft-flow | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60d3850107da9c17c7270912/WzhEbEvjunrDJ2IpdOxtZ.png",
"fullname": "Noah Kasmanoff",
"name": "nkasmanoff",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 13,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"victor",
"lunarflu",
"John6666",
"Bruhn",
"ZeroWw"
],
"count": 5
}
] | 2024-08-18T18:41:50.000Z | 2024-08-18T18:41:50.449Z | [] | /posts/nkasmanoff/587862319331275 | 1,820 | 0 |
945909094632271 | [
{
"type": "text",
"value": "🔔 Release: small-text v1.4.1",
"raw": "🔔 Release: small-text v1.4.1",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "The new release contains some smaller bugfixes. Check it out!",
"raw": "The new release contains some smaller bugfixes. Check it out!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Github: ",
"raw": "Github: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/webis-de/small-text",
"resource": null,
"url": null,
"href": "https://github.com/webis-de/small-text",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Paper: ",
"raw": "Paper: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2107.10314",
"resource": {
"type": "paper",
"id": "2107.10314",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2107.10314",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": "Small-Text: Active Learning for Text Classification in Python (2107.10314)"
}
] | 🔔 Release: small-text v1.4.1
The new release contains some smaller bugfixes. Check it out!
Github: https://github.com/webis-de/small-text
Paper: https://huggingface.co/papers/2107.10314 | {
"avatarUrl": "/avatars/f32291df2054c1bb4a01889d1b41c0d5.svg",
"fullname": "Christopher Schröder",
"name": "cschroeder",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 17,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666",
"victor"
],
"count": 2
}
] | 2024-08-18T16:10:34.000Z | 2024-08-18T16:10:34.980Z | [] | /posts/cschroeder/945909094632271 | 1,881 | 0 |
786964208614949 | [
{
"type": "text",
"value": "🚀 We’re excited to launch Ghost 8B Beta (1608), a top-performing language model with unmatched multilingual support and cost efficiency.",
"raw": "🚀 We’re excited to launch Ghost 8B Beta (1608), a top-performing language model with unmatched multilingual support and cost efficiency.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Key Highlights:",
"raw": "Key Highlights:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Superior Performance: Outperforms Llama 3.1 8B Instruct, GPT-3.5 Turbo, Claude 3 Opus, GPT-4, and more in winrate scores.",
"raw": "- Superior Performance: Outperforms Llama 3.1 8B Instruct, GPT-3.5 Turbo, Claude 3 Opus, GPT-4, and more in winrate scores.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Expanded Language Support: Now supports 16 languages, including English, Vietnamese, Spanish, Chinese, and more.",
"raw": "- Expanded Language Support: Now supports 16 languages, including English, Vietnamese, Spanish, Chinese, and more.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "- Enhanced Capabilities: Improved math, reasoning, and instruction-following for better task handling.",
"raw": "- Enhanced Capabilities: Improved math, reasoning, and instruction-following for better task handling.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "With two context options (8k and 128k), Ghost 8B Beta is perfect for complex, multilingual applications, balancing power and cost-effectiveness.",
"raw": "With two context options (8k and 128k), Ghost 8B Beta is perfect for complex, multilingual applications, balancing power and cost-effectiveness.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🔗 Learn More: ",
"raw": "🔗 Learn More: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://ghost-x.org/docs/models/ghost-8b-beta",
"resource": null,
"url": null,
"href": "https://ghost-x.org/docs/models/ghost-8b-beta",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/ghost-x/ghost-8b-beta-668ead6179f93be717db4542",
"resource": {
"type": "collection",
"id": "ghost-x/ghost-8b-beta-668ead6179f93be717db4542",
"discussionNum": null
},
"url": "https://huggingface.co/collections/ghost-x/ghost-8b-beta-668ead6179f93be717db4542",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | 🚀 We’re excited to launch Ghost 8B Beta (1608), a top-performing language model with unmatched multilingual support and cost efficiency.
Key Highlights:
- Superior Performance: Outperforms Llama 3.1 8B Instruct, GPT-3.5 Turbo, Claude 3 Opus, GPT-4, and more in winrate scores.
- Expanded Language Support: Now supports 16 languages, including English, Vietnamese, Spanish, Chinese, and more.
- Enhanced Capabilities: Improved math, reasoning, and instruction-following for better task handling.
With two context options (8k and 128k), Ghost 8B Beta is perfect for complex, multilingual applications, balancing power and cost-effectiveness.
🔗 Learn More: https://ghost-x.org/docs/models/ghost-8b-beta
https://huggingface.co/collections/ghost-x/ghost-8b-beta-668ead6179f93be717db4542 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/600ae38cc92b79f54efd4556/cSqRIslYl5L3I4WK3a31f.png",
"fullname": "Hieu Lam",
"name": "lamhieu",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 74,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"John6666",
"victor",
"brainhome",
"ajibawa-2023",
"osanseviero",
"Annu72772",
"nbroad",
"Joseph717171",
"ZeroWw",
"KingNish",
"Rybens"
],
"count": 11
},
{
"reaction": "👀",
"users": [
"victor",
"stylooantlabs",
"nbroad",
"Joseph717171",
"ZeroWw"
],
"count": 5
},
{
"reaction": "➕",
"users": [
"ZeroWw"
],
"count": 1
}
] | 2024-08-18T12:08:00.000Z | 2024-08-18T12:08:34.718Z | [] | /posts/lamhieu/786964208614949 | 3,188 | 0 |
541899628113938 | [
{
"type": "text",
"value": "Wrote a blog post with some ideas about prompt engineering ",
"raw": "Wrote a blog post with some ideas about prompt engineering ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/KnutJaegersberg/first-principles-prompt-engineering",
"resource": null,
"url": null,
"href": "https://huggingface.co/blog/KnutJaegersberg/first-principles-prompt-engineering",
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Wrote a blog post with some ideas about prompt engineering
https://huggingface.co/blog/KnutJaegersberg/first-principles-prompt-engineering | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1669551186189-63732ebbbd81fae2b3aaf3fb.jpeg",
"fullname": "Knut Jägersberg",
"name": "KnutJaegersberg",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 238,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🤗",
"users": [
"prithivMLmods",
"John6666",
"victor",
"osanseviero",
"appvoid"
],
"count": 5
}
] | 2024-08-18T10:02:22.000Z | 2024-08-18T10:02:22.018Z | [] | /posts/KnutJaegersberg/541899628113938 | 2,193 | 0 |
710847636717551 | [
{
"type": "text",
"value": "I'm excited to share a really cool milestone in my AI/LLM journey.",
"raw": "I'm excited to share a really cool milestone in my AI/LLM journey.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Brief backstory: Before diving into AI, I spent over a decade working in ecological fields such as the conservation corps, biodynamic farming, and natural habitat restoration. This background instilled in me a deep concern about the environmental impact of scaling AI without sustainable practices.",
"raw": "Brief backstory: Before diving into AI, I spent over a decade working in ecological fields such as the conservation corps, biodynamic farming, and natural habitat restoration. This background instilled in me a deep concern about the environmental impact of scaling AI without sustainable practices.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Driven by this concern, I've spent months planning and experimenting to make my AI work more eco-friendly. I'm thrilled to announce that I've successfully transitioned my entire operation to run on 100% sustainable solar power!",
"raw": "Driven by this concern, I've spent months planning and experimenting to make my AI work more eco-friendly. I'm thrilled to announce that I've successfully transitioned my entire operation to run on 100% sustainable solar power!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "My current setup includes multiple linked Mac Pro tower desktops and custom code built from open-source libraries. While it's a bit experimental, this configuration is working great for my needs. All my LLM research, development, and client services now run exclusively on solar energy.",
"raw": "My current setup includes multiple linked Mac Pro tower desktops and custom code built from open-source libraries. While it's a bit experimental, this configuration is working great for my needs. All my LLM research, development, and client services now run exclusively on solar energy.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "I'm curious if anyone else here has experimented with renewable energy for their LLM work?",
"raw": "I'm curious if anyone else here has experimented with renewable energy for their LLM work?",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "For those interested in more details, I've written a brief blog post about this journey here ",
"raw": "For those interested in more details, I've written a brief blog post about this journey here ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://medium.com/@betalabsllm/powering-the-future-be-ta-labs-revolutionary-100-solar-powered-ai-operation-444433e61d43",
"resource": null,
"url": null,
"href": "https://medium.com/@betalabsllm/powering-the-future-be-ta-labs-revolutionary-100-solar-powered-ai-operation-444433e61d43",
"user": null,
"lang": null,
"code": null,
"label": null
}
] | I'm excited to share a really cool milestone in my AI/LLM journey.
Brief backstory: Before diving into AI, I spent over a decade working in ecological fields such as the conservation corps, biodynamic farming, and natural habitat restoration. This background instilled in me a deep concern about the environmental impact of scaling AI without sustainable practices.
Driven by this concern, I've spent months planning and experimenting to make my AI work more eco-friendly. I'm thrilled to announce that I've successfully transitioned my entire operation to run on 100% sustainable solar power!
My current setup includes multiple linked Mac Pro tower desktops and custom code built from open-source libraries. While it's a bit experimental, this configuration is working great for my needs. All my LLM research, development, and client services now run exclusively on solar energy.
I'm curious if anyone else here has experimented with renewable energy for their LLM work?
For those interested in more details, I've written a brief blog post about this journey here https://medium.com/@betalabsllm/powering-the-future-be-ta-labs-revolutionary-100-solar-powered-ai-operation-444433e61d43 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64740cf7485a7c8e1bd51ac9/CXZCJm2x4ToT83pEIYyQR.png",
"fullname": "Beckett Dillon",
"name": "Severian",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 175,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"victor",
"ajibawa-2023",
"osanseviero",
"shawon",
"Olafangensan",
"Bruhn",
"John6666",
"vipulmaheshwari",
"kumail1"
],
"count": 9
}
] | 2024-08-18T09:00:52.000Z | 2024-08-19T09:55:47.551Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64aea8ff67511bd3d965697b/Jxn52EmDF5RApJh8antxn.jpeg",
"fullname": "Feynman Innovations",
"name": "ajibawa-2023",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 137,
"isFollowing": false
}
] | /posts/Severian/710847636717551 | 1,926 | 1 |
724160481483785 | [
{
"type": "text",
"value": "How good are you at spotting AI-generated images? ",
"raw": "How good are you at spotting AI-generated images? ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Find out by playing Fake Insects 🐞 a Game where you need to identify which insects are fake (AI generated). Good luck & share your best score in the comments! ",
"raw": "Find out by playing Fake Insects 🐞 a Game where you need to identify which insects are fake (AI generated). Good luck & share your best score in the comments! ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/victor/fake-insects",
"resource": {
"type": "space",
"id": "victor/fake-insects",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/victor/fake-insects",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | How good are you at spotting AI-generated images?
Find out by playing Fake Insects 🐞 a Game where you need to identify which insects are fake (AI generated). Good luck & share your best score in the comments!
https://huggingface.co/spaces/victor/fake-insects | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg",
"fullname": "Victor Mustar",
"name": "victor",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 2578,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/5f17f0a0925b9863e28ad517/Gn3CqEf83euvbSF1089W5.png"
}
] | [] | [
{
"reaction": "👍",
"users": [
"cschroeder",
"Nenux",
"John6666",
"Clausss",
"ipushprajyadav",
"diwank",
"osanseviero",
"DiamanteAmarelo"
],
"count": 8
},
{
"reaction": "❤️",
"users": [
"ijohn07",
"Clausss",
"den0620",
"mmx31",
"diwank",
"samadpls",
"maximuspowers",
"DiamanteAmarelo"
],
"count": 8
},
{
"reaction": "🤯",
"users": [
"danielus",
"osanseviero",
"DiamanteAmarelo"
],
"count": 3
},
{
"reaction": "🧠",
"users": [
"DiamanteAmarelo"
],
"count": 1
},
{
"reaction": "🔥",
"users": [
"Shashenkov"
],
"count": 1
}
] | 2024-08-17T18:25:03.000Z | 2024-08-25T16:20:29.158Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/66c128b7d9a7974122e98d3a/HrEA55MaIK-11dConk7v6.jpeg",
"fullname": "Hussain",
"name": "riz926",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/653bccf9c5ba23359b23a76b/Na3jFV7tNdB_DXh-iXk_D.png",
"fullname": "cgus",
"name": "cgus",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 5,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64a2cd7342b2a76a308b3daf/o6SV0ilIA1sov088MaN9j.jpeg",
"fullname": "Maximus Powers",
"name": "maximuspowers",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 9,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg",
"fullname": "Victor Mustar",
"name": "victor",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 2578,
"isFollowing": false
},
{
"avatarUrl": "/avatars/7c3f6cfef992166991444eb6114ace40.svg",
"fullname": "Georgy Shashenkov",
"name": "Shashenkov",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "/avatars/13e17d1d70464bad0b71f5ee7f955bfb.svg",
"fullname": "Sammy Kernane",
"name": "DiogeneRSA",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
}
] | /posts/victor/724160481483785 | 4,125 | 6 |
960119704549684 | [
{
"type": "text",
"value": "So turns out I've been spreading a bit of misinformation when it comes to imatrix in llama.cpp",
"raw": "So turns out I've been spreading a bit of misinformation when it comes to imatrix in llama.cpp",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "It starts true; imatrix runs the model against a corpus of text and tracks the activation of weights to determine which are most important",
"raw": "It starts true; imatrix runs the model against a corpus of text and tracks the activation of weights to determine which are most important",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "However what the quantization then does with that information is where I was wrong.",
"raw": "However what the quantization then does with that information is where I was wrong.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "I think I made the accidental connection between imatrix and exllamav2's measuring, where ExLlamaV2 decides how many bits to assign to which weight depending on the goal BPW",
"raw": "I think I made the accidental connection between imatrix and exllamav2's measuring, where ExLlamaV2 decides how many bits to assign to which weight depending on the goal BPW",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Instead, what llama.cpp with imatrix does is it attempts to select a scale for a quantization block that most accurately returns the important weights to their original values, ie minimizing the dequantization error based on the importance of activations",
"raw": "Instead, what llama.cpp with imatrix does is it attempts to select a scale for a quantization block that most accurately returns the important weights to their original values, ie minimizing the dequantization error based on the importance of activations",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "The mildly surprising part is that it actually just does a relatively brute force search, it picks a bunch of scales and tries each and sees which one results in the minimum error for weights deemed important in the group",
"raw": "The mildly surprising part is that it actually just does a relatively brute force search, it picks a bunch of scales and tries each and sees which one results in the minimum error for weights deemed important in the group",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "But yeah, turns out, the quantization scheme is always the same, it's just that the scaling has a bit more logic to it when you use imatrix",
"raw": "But yeah, turns out, the quantization scheme is always the same, it's just that the scaling has a bit more logic to it when you use imatrix",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Huge shoutout to ",
"raw": "Huge shoutout to ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "mention",
"value": null,
"raw": "@compilade",
"resource": null,
"url": null,
"href": null,
"user": "compilade",
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " for helping me wrap my head around it - feel free to add/correct as well if I've messed something up",
"raw": " for helping me wrap my head around it - feel free to add/correct as well if I've messed something up",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | So turns out I've been spreading a bit of misinformation when it comes to imatrix in llama.cpp
It starts true; imatrix runs the model against a corpus of text and tracks the activation of weights to determine which are most important
However what the quantization then does with that information is where I was wrong.
I think I made the accidental connection between imatrix and exllamav2's measuring, where ExLlamaV2 decides how many bits to assign to which weight depending on the goal BPW
Instead, what llama.cpp with imatrix does is it attempts to select a scale for a quantization block that most accurately returns the important weights to their original values, ie minimizing the dequantization error based on the importance of activations
The mildly surprising part is that it actually just does a relatively brute force search, it picks a bunch of scales and tries each and sees which one results in the minimum error for weights deemed important in the group
But yeah, turns out, the quantization scheme is always the same, it's just that the scaling has a bit more logic to it when you use imatrix
Huge shoutout to @compilade for helping me wrap my head around it - feel free to add/correct as well if I've messed something up | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6435718aaaef013d1aec3b8b/XKf-8MA47tjVAM6SCX0MP.jpeg",
"fullname": "Bartowski",
"name": "bartowski",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 2735,
"isFollowing": false
} | [] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/4Az8a8F60rNOD3L3ThsCe.png",
"fullname": "Compilade",
"name": "compilade",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3
}
] | [
{
"reaction": "❤️",
"users": [
"MaziyarPanahi",
"victor",
"Themoneybuddha",
"John6666",
"bullerwins",
"Rybens",
"Anderson452",
"NOBODYVONNOTHING",
"qnixsynapse",
"Clausss",
"exdysa",
"AtAndDev",
"IAmTheCollector",
"wsuff",
"rreed-pha",
"djuna",
"MarinaraSpaghetti",
"Pomni",
"RakshitAralimatti",
"osanseviero",
"venketh",
"Arakinas",
"wonderboy",
"nicoboss",
"Joseph717171",
"AIGUYCONTENT",
"Handgun1773",
"bradeenl",
"EloyOn"
],
"count": 29
},
{
"reaction": "👍",
"users": [
"Aurelien-Morgan",
"Clausss",
"AtAndDev",
"sroecker",
"iandol",
"rreed-pha",
"ajibawa-2023",
"ijohn07",
"thiera1",
"nbeerbower",
"newsletter",
"venketh",
"mpk63",
"wonderboy",
"clem",
"nicoboss",
"jsfs11",
"Joseph717171",
"waldie",
"fendiprime"
],
"count": 20
},
{
"reaction": "🚀",
"users": [
"rreed-pha",
"wonderboy",
"nicoboss",
"Joseph717171"
],
"count": 4
}
] | 2024-08-17T12:41:03.000Z | 2024-08-24T07:22:10.246Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5fd5e18a90b6dc4633f6d292/gZXHW5dd9R86AV9LMZ--y.png",
"fullname": "Maziyar Panahi",
"name": "MaziyarPanahi",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 1496,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/no-auth/_O-ycBVEVTAvhqC9wH4v2.png",
"fullname": "Eric Sims",
"name": "Themoneybuddha",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "/avatars/c6c710651ec2bde1680497afd4649281.svg",
"fullname": "John Freier",
"name": "mpk63",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/623aada982038110d90921f3/nSHSXJz7PYyw9fn8qoAn4.jpeg",
"fullname": "zero",
"name": "wonderboy",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "/avatars/ea4398745974d781ae9dc0e95b12cabe.svg",
"fullname": "Joseph",
"name": "Joseph717171",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 22,
"isFollowing": false
}
] | /posts/bartowski/960119704549684 | 10,008 | 5 |
860006486706088 | [
{
"type": "text",
"value": "I know a secret about knowledge graphs that the world doesn't! There are severe mathematical limitations to geometric fractals. It is classed as an 'unsolvable problem' in the mathematics world. There are currently ~1,000 mathematicians or so in the world that give this problem serious thought. You literally cannot solve it with geometric fractals. This is why I invented P-FAF, it uses calculus based fractals instead. I literally invented the math to make it work. I solved an 'unsolvable equation' to make the math work. You ONLY make the math work the way I did it in the end. I have never released the licensing commercially. Good luck!",
"raw": "I know a secret about knowledge graphs that the world doesn't! There are severe mathematical limitations to geometric fractals. It is classed as an 'unsolvable problem' in the mathematics world. There are currently ~1,000 mathematicians or so in the world that give this problem serious thought. You literally cannot solve it with geometric fractals. This is why I invented P-FAF, it uses calculus based fractals instead. I literally invented the math to make it work. I solved an 'unsolvable equation' to make the math work. You ONLY make the math work the way I did it in the end. I have never released the licensing commercially. Good luck!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | I know a secret about knowledge graphs that the world doesn't! There are severe mathematical limitations to geometric fractals. It is classed as an 'unsolvable problem' in the mathematics world. There are currently ~1,000 mathematicians or so in the world that give this problem serious thought. You literally cannot solve it with geometric fractals. This is why I invented P-FAF, it uses calculus based fractals instead. I literally invented the math to make it work. I solved an 'unsolvable equation' to make the math work. You ONLY make the math work the way I did it in the end. I have never released the licensing commercially. Good luck! | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png",
"fullname": "Richard A Aragon",
"name": "TuringsSolutions",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 148,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"ijohn07"
],
"count": 1
}
] | 2024-08-17T09:47:28.000Z | 2024-08-17T09:59:45.121Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png",
"fullname": "Richard A Aragon",
"name": "TuringsSolutions",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 148,
"isFollowing": false
}
] | /posts/TuringsSolutions/860006486706088 | 573 | 1 |
864345037222506 | [
{
"type": "text",
"value": "Supercool Weekend Read🤖",
"raw": "Supercool Weekend Read🤖",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Nvidia researchers achieved SOTA LLM compression metrics using pruning and knowledge distillation techniques.",
"raw": "Nvidia researchers achieved SOTA LLM compression metrics using pruning and knowledge distillation techniques.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Details on Techniques (Simplified):",
"raw": "Details on Techniques (Simplified):",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "They started off with a large pre-trained language model (15B params), then:",
"raw": "They started off with a large pre-trained language model (15B params), then:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "1. Estimated the importance of different parts of the model (neurons, attention heads, layers) using activation-based metrics on a small calibration dataset.",
"raw": "1. Estimated the importance of different parts of the model (neurons, attention heads, layers) using activation-based metrics on a small calibration dataset.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "2. Pruned (remove) less important parts of the model to reduce its size.",
"raw": "2. Pruned (remove) less important parts of the model to reduce its size.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "3. Retrained the pruned model using knowledge distillation, where the original large model acts as a teacher for the smaller pruned model.",
"raw": "3. Retrained the pruned model using knowledge distillation, where the original large model acts as a teacher for the smaller pruned model.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "4. Used a lightweight neural architecture search to find the best configuration for the pruned model.",
"raw": "4. Used a lightweight neural architecture search to find the best configuration for the pruned model.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "5. Repeated this process iteratively to create even smaller models.",
"raw": "5. Repeated this process iteratively to create even smaller models.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Cool, giving it a try this weekend 😎",
"raw": "Cool, giving it a try this weekend 😎",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Code: ",
"raw": "Code: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/NVlabs/Minitron",
"resource": null,
"url": null,
"href": "https://github.com/NVlabs/Minitron",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Paper: ",
"raw": "Paper: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "link",
"value": null,
"raw": "https://arxiv.org/abs/2407.14679",
"resource": null,
"url": null,
"href": "https://arxiv.org/abs/2407.14679",
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Demo: ",
"raw": "Demo: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/nvidia/minitron",
"resource": {
"type": "space",
"id": "nvidia/minitron",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/nvidia/minitron",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Supercool Weekend Read🤖
Nvidia researchers achieved SOTA LLM compression metrics using pruning and knowledge distillation techniques.
Details on Techniques (Simplified):
They started off with a large pre-trained language model (15B params), then:
1. Estimated the importance of different parts of the model (neurons, attention heads, layers) using activation-based metrics on a small calibration dataset.
2. Pruned (remove) less important parts of the model to reduce its size.
3. Retrained the pruned model using knowledge distillation, where the original large model acts as a teacher for the smaller pruned model.
4. Used a lightweight neural architecture search to find the best configuration for the pruned model.
5. Repeated this process iteratively to create even smaller models.
Cool, giving it a try this weekend 😎
Code: https://github.com/NVlabs/Minitron
Paper: https://arxiv.org/abs/2407.14679
Demo: https://huggingface.co/spaces/nvidia/minitron | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg",
"fullname": "Jaward Sesay",
"name": "Jaward",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 189,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/6LfH7qSzvq_r9DiRWdS3J.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/SRC0DzfZnkVdyGz_BatTj.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/HGE5t5vBrVQTPGMD2EaLT.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/r84zuFw97xsQYfnqbXjBT.jpeg"
}
] | [] | [
{
"reaction": "👍",
"users": [
"Edouard360",
"ajibawa-2023",
"adamelliotfields",
"davidle7",
"KingNish"
],
"count": 5
},
{
"reaction": "👀",
"users": [
"John6666",
"Dihelson",
"tuanlda78202"
],
"count": 3
},
{
"reaction": "❤️",
"users": [
"Dihelson"
],
"count": 1
}
] | 2024-08-17T03:42:44.000Z | 2024-08-18T03:10:40.798Z | [] | /posts/Jaward/864345037222506 | 1,784 | 0 |
970159160979953 | [
{
"type": "text",
"value": "Added FLUX.1 pro/dev/schnell and AuraFlow v0.2 to ",
"raw": "Added FLUX.1 pro/dev/schnell and AuraFlow v0.2 to ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/fal/imgsys",
"resource": {
"type": "space",
"id": "fal/imgsys",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/fal/imgsys",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " !!! Go play with it and get us some votez",
"raw": " !!! Go play with it and get us some votez",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Added FLUX.1 pro/dev/schnell and AuraFlow v0.2 to https://huggingface.co/spaces/fal/imgsys !!! Go play with it and get us some votez | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6380ebb8471a4550ff255c62/-5tqR0SqLU53cOsXA-4ON.jpeg",
"fullname": "Batuhan",
"name": "isidentical",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 80,
"isFollowing": false
} | [] | [] | [] | 2024-08-17T03:42:22.000Z | 2024-08-17T03:42:35.601Z | [] | /posts/isidentical/970159160979953 | 581 | 0 |
529523271915020 | [
{
"type": "text",
"value": "NEW math-instruct model + dataset!",
"raw": "NEW math-instruct model + dataset!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/ValiantLabs/Llama3.1-8B-Cobalt",
"resource": {
"type": "model",
"id": "ValiantLabs/Llama3.1-8B-Cobalt",
"discussionNum": null
},
"url": "https://huggingface.co/ValiantLabs/Llama3.1-8B-Cobalt",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " is our new math-instruct model.",
"raw": " is our new math-instruct model.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Trained using a synthetic math-instruct dataset generated with Llama 3.1 405b. Find the dataset here: ",
"raw": "Trained using a synthetic math-instruct dataset generated with Llama 3.1 405b. Find the dataset here: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/sequelbox/Polytope",
"resource": {
"type": "dataset",
"id": "sequelbox/Polytope",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/sequelbox/Polytope",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "More to come soon :)",
"raw": "More to come soon :)",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | NEW math-instruct model + dataset!
https://huggingface.co/ValiantLabs/Llama3.1-8B-Cobalt is our new math-instruct model.
Trained using a synthetic math-instruct dataset generated with Llama 3.1 405b. Find the dataset here: https://huggingface.co/datasets/sequelbox/Polytope
More to come soon :) | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63444f2687964b331809eb55/WvZivsvKsM_t0tBtakovK.png",
"fullname": "t.d.a.g.",
"name": "sequelbox",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 50,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"legolasyiu",
"Bruhn",
"drdata"
],
"count": 3
},
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-08-16T16:38:07.000Z | 2024-08-16T16:38:07.863Z | [] | /posts/sequelbox/529523271915020 | 1,607 | 0 |
972313929139427 | [
{
"type": "text",
"value": "Introducing HelpingAI2-9B, an emotionally intelligent LLM. ",
"raw": "Introducing HelpingAI2-9B, an emotionally intelligent LLM. ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Model Link : ",
"raw": "Model Link : ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/OEvortex/HelpingAI2-9B",
"resource": {
"type": "model",
"id": "OEvortex/HelpingAI2-9B",
"discussionNum": null
},
"url": "https://huggingface.co/OEvortex/HelpingAI2-9B",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Demo Link: ",
"raw": "Demo Link: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/Abhaykoul/HelpingAI2",
"resource": {
"type": "space",
"id": "Abhaykoul/HelpingAI2",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/Abhaykoul/HelpingAI2",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "This model is part of the innovative HelpingAI series and it stands out for its ability to engage users with emotional understanding.",
"raw": "This model is part of the innovative HelpingAI series and it stands out for its ability to engage users with emotional understanding.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Key Features:",
"raw": "Key Features:",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "-----------------",
"raw": "-----------------",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "* It gets 95.89 score on EQ Bench greather than all top notch LLMs, reflecting advanced emotional recognition.",
"raw": "* It gets 95.89 score on EQ Bench greather than all top notch LLMs, reflecting advanced emotional recognition.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "* It gives responses in empathetic and supportive manner.",
"raw": "* It gives responses in empathetic and supportive manner.",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Must try our demo: ",
"raw": "Must try our demo: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/Abhaykoul/HelpingAI2",
"resource": {
"type": "space",
"id": "Abhaykoul/HelpingAI2",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/Abhaykoul/HelpingAI2",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Introducing HelpingAI2-9B, an emotionally intelligent LLM.
Model Link : https://huggingface.co/OEvortex/HelpingAI2-9B
Demo Link: https://huggingface.co/spaces/Abhaykoul/HelpingAI2
This model is part of the innovative HelpingAI series and it stands out for its ability to engage users with emotional understanding.
Key Features:
-----------------
* It gets 95.89 score on EQ Bench greather than all top notch LLMs, reflecting advanced emotional recognition.
* It gives responses in empathetic and supportive manner.
Must try our demo: https://huggingface.co/spaces/Abhaykoul/HelpingAI2 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64be41c330a1f0f0f0a1e0eb/vvkpXYESXL_LkfrzzfUB-.jpeg",
"fullname": "Abhay Koul",
"name": "Abhaykoul",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 48,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"prithivMLmods",
"Ken0965",
"VictorSanh",
"thebryanalvarado",
"KingNish",
"ha1772007",
"GPT007",
"victor"
],
"count": 8
},
{
"reaction": "❤️",
"users": [
"ijohn07",
"KingNish",
"victor"
],
"count": 3
},
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-08-16T11:38:23.000Z | 2024-08-16T11:38:23.841Z | [] | /posts/Abhaykoul/972313929139427 | 2,710 | 0 |
343314951558641 | [
{
"type": "text",
"value": "Announcing another BIG data drop! This time it's ~275M images from Flickr ",
"raw": "Announcing another BIG data drop! This time it's ~275M images from Flickr ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/bigdata-pw/Flickr",
"resource": {
"type": "dataset",
"id": "bigdata-pw/Flickr",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/bigdata-pw/Flickr",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Data acquisition for this project is still in progress, get ready for an update soon:tm: ",
"raw": "Data acquisition for this project is still in progress, get ready for an update soon:tm: ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "In case you missed them; other BIG data drops include Diffusion1B ",
"raw": "In case you missed them; other BIG data drops include Diffusion1B ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/bigdata-pw/Diffusion1B",
"resource": {
"type": "dataset",
"id": "bigdata-pw/Diffusion1B",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/bigdata-pw/Diffusion1B",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " - ~1.23B images and generation parameters from a variety of diffusion models and if you fancy practicing diffusion model training check out Dataception ",
"raw": " - ~1.23B images and generation parameters from a variety of diffusion models and if you fancy practicing diffusion model training check out Dataception ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/bigdata-pw/Dataception",
"resource": {
"type": "dataset",
"id": "bigdata-pw/Dataception",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/bigdata-pw/Dataception",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " - a dataset of over 5000 datasets in WebDataset format!",
"raw": " - a dataset of over 5000 datasets in WebDataset format!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Requests are always welcome so reach out if there's a dataset you'd like to see!",
"raw": "Requests are always welcome so reach out if there's a dataset you'd like to see!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Announcing another BIG data drop! This time it's ~275M images from Flickr https://huggingface.co/datasets/bigdata-pw/Flickr
Data acquisition for this project is still in progress, get ready for an update soon:tm:
In case you missed them; other BIG data drops include Diffusion1B https://huggingface.co/datasets/bigdata-pw/Diffusion1B - ~1.23B images and generation parameters from a variety of diffusion models and if you fancy practicing diffusion model training check out Dataception https://huggingface.co/datasets/bigdata-pw/Dataception - a dataset of over 5000 datasets in WebDataset format!
Requests are always welcome so reach out if there's a dataset you'd like to see! | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/665074ded3e886a93d713e73/tTAkhsz2J-uEQAfSsRvAf.jpeg",
"fullname": "hlky",
"name": "hlky",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 17,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"ajibawa-2023",
"John6666",
"Mdubbya",
"dashfunnydashdash",
"ZeroWw"
],
"count": 5
}
] | 2024-08-15T18:55:29.000Z | 2024-08-16T09:17:26.971Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64d323b950310d7ad397fce7/KKGL1Y5ZeQSBTqaeKQCaY.png",
"fullname": "Ryan Miller",
"name": "Meroar",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
}
] | /posts/hlky/343314951558641 | 1,909 | 1 |
576586250383339 | [
{
"type": "text",
"value": "✨ Feeling thankful... ",
"raw": "✨ Feeling thankful... ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🇮🇳 15th August, 2024; on India's 78th Independence Day ",
"raw": "🇮🇳 15th August, 2024; on India's 78th Independence Day ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🎉 Crossed 100 followers on Hugging Face",
"raw": "🎉 Crossed 100 followers on Hugging Face",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🏆 Got LinkedIn Top Voice",
"raw": "🏆 Got LinkedIn Top Voice",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "🤖 AI has never been more exciting and I am here for it",
"raw": "🤖 AI has never been more exciting and I am here for it",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "👀 ",
"raw": "👀 ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "mention",
"value": null,
"raw": "@clem",
"resource": null,
"url": null,
"href": null,
"user": "clem",
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": " Can I be a Hugging Face fellow now? ",
"raw": " Can I be a Hugging Face fellow now? ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | ✨ Feeling thankful...
🇮🇳 15th August, 2024; on India's 78th Independence Day
🎉 Crossed 100 followers on Hugging Face
🏆 Got LinkedIn Top Voice
🤖 AI has never been more exciting and I am here for it
👀 @clem Can I be a Hugging Face fellow now? | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png",
"fullname": "Kuldeep Singh Sidhu",
"name": "singhsidhukuldeep",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 197,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/-10pNrJFQQKsNlXf5_kqs.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/BVppofWzAZ1xB8jYIYhZB.jpeg"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg",
"fullname": "Clem 🤗",
"name": "clem",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 1734
}
] | [
{
"reaction": "🚀",
"users": [
"wonderboy",
"DavidAU",
"mmx31",
"KingNish",
"YaTharThShaRma999"
],
"count": 5
}
] | 2024-08-15T18:29:10.000Z | 2024-08-15T18:29:10.232Z | [] | /posts/singhsidhukuldeep/576586250383339 | 1,699 | 0 |
780629919182270 | [
{
"type": "text",
"value": "Improved ControlNet! ",
"raw": "Improved ControlNet! ",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "Now supports dynamic resolution for perfect landscape and portrait outputs. Generate stunning images without distortion—optimized for any aspect ratio!",
"raw": "Now supports dynamic resolution for perfect landscape and portrait outputs. Generate stunning images without distortion—optimized for any aspect ratio!",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "text",
"value": "...",
"raw": "...",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"resource": null,
"url": null,
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/DamarJati/FLUX.1-DEV-Canny",
"resource": {
"type": "space",
"id": "DamarJati/FLUX.1-DEV-Canny",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/DamarJati/FLUX.1-DEV-Canny",
"href": null,
"user": null,
"lang": null,
"code": null,
"label": null
}
] | Improved ControlNet!
Now supports dynamic resolution for perfect landscape and portrait outputs. Generate stunning images without distortion—optimized for any aspect ratio!
...
https://huggingface.co/spaces/DamarJati/FLUX.1-DEV-Canny | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6456f3ca1ca9debab0554f8b/pQuSK-pS3NnJgXewbODvh.png",
"fullname": "Damar Jati 🍫",
"name": "DamarJati",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 113,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6456f3ca1ca9debab0554f8b/0UcKm_7YFwtdCNO1mMSy-.jpeg"
}
] | [] | [
{
"reaction": "👍",
"users": [
"John6666",
"williamsoeherman",
"saikanov"
],
"count": 3
},
{
"reaction": "❤️",
"users": [
"ijohn07",
"williamsoeherman"
],
"count": 2
}
] | 2024-08-15T15:17:57.000Z | 2024-08-15T15:49:32.401Z | [] | /posts/DamarJati/780629919182270 | 3,080 | 0 |