slug
stringlengths
15
15
content
listlengths
1
129
rawContent
stringlengths
1
2k
author
dict
attachments
listlengths
0
49
mentions
listlengths
0
49
reactions
listlengths
0
12
publishedAt
stringlengths
24
24
updatedAt
stringlengths
24
24
commentators
listlengths
0
47
url
stringlengths
25
46
totalUniqueImpressions
int64
1
41.5k
numComments
int64
0
621
927811517468266
[ { "type": "text", "value": "How can I make my RAG application generate real-time responses? Up until now, I have been using Groq for fast LLM generation and the Gradio Live function. I am looking for a better solution that can help me build a real-time application without any delay. ", "raw": "How can I make my RAG application generate real-time responses? Up until now, I have been using Groq for fast LLM generation and the Gradio Live function. I am looking for a better solution that can help me build a real-time application without any delay. ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@abidlabs", "resource": null, "url": null, "href": null, "user": "abidlabs", "lang": null, "code": null, "label": null }, { "type": "text", "value": " ", "raw": " ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/kingabzpro/Real-Time-RAG", "resource": { "type": "space", "id": "kingabzpro/Real-Time-RAG", "discussionNum": null }, "url": "https://huggingface.co/spaces/kingabzpro/Real-Time-RAG", "href": null, "user": null, "lang": null, "code": null, "label": null } ]
How can I make my RAG application generate real-time responses? Up until now, I have been using Groq for fast LLM generation and the Gradio Live function. I am looking for a better solution that can help me build a real-time application without any delay. @abidlabs https://huggingface.co/spaces/kingabzpro/Real-Time-RAG
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/603945d6db430f160dced222/Rf3ChIRWR8eBi7sEVgl4s.png", "fullname": "Abid Ali Awan", "name": "kingabzpro", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 28, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/603945d6db430f160dced222/_zAZbK81qxbj7bIufhCqm.gif" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1621947938344-noauth.png", "fullname": "Abubakar Abid", "name": "abidlabs", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 482 } ]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "bitdeep", "pryanshusharma", "Jawaher786", "AtAndDev", "DataSoul", "privategeek24", "kingabzpro" ], "count": 8 } ]
2024-09-07T08:57:26.000Z
2024-09-09T15:34:38.343Z
[ { "avatarUrl": "/avatars/cec7d06fd895a347b742baea8a90d224.svg", "fullname": "Donald", "name": "SVHawk13", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/603945d6db430f160dced222/Rf3ChIRWR8eBi7sEVgl4s.png", "fullname": "Abid Ali Awan", "name": "kingabzpro", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 28, "isFollowing": false } ]
/posts/kingabzpro/927811517468266
1,838
2
456542013174124
[ { "type": "text", "value": "Google's Chain-of-Thought (CoT) is one of the most effective ways to improve LLMs' reasoning.", "raw": "Google's Chain-of-Thought (CoT) is one of the most effective ways to improve LLMs' reasoning.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Researchers have now developed a novel approach called Strategic Chain-of-Thought (SCoT) to enhance the reasoning capabilities of large language models even further.", "raw": "Researchers have now developed a novel approach called Strategic Chain-of-Thought (SCoT) to enhance the reasoning capabilities of large language models even further.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿง  SCoT uses a two-stage process within a single prompt:", "raw": "๐Ÿง  SCoT uses a two-stage process within a single prompt:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Strategy Elicitation: The model first identifies and determines an effective problem-solving strategy for the given task. This becomes the strategic knowledge that guides the reasoning process.", "raw": "- Strategy Elicitation: The model first identifies and determines an effective problem-solving strategy for the given task. This becomes the strategic knowledge that guides the reasoning process.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Strategy Application: The model then applies the identified strategic knowledge to solve the problem and generate the final answer.", "raw": "- Strategy Application: The model then applies the identified strategic knowledge to solve the problem and generate the final answer.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Essentially, SCoT integrates strategic knowledge to guide reasoning without relying on external knowledge sources or multiple queries.", "raw": "Essentially, SCoT integrates strategic knowledge to guide reasoning without relying on external knowledge sources or multiple queries.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "According to the research, SCoT showed significant improvements over standard CoT across various datasets, including a 21.05% increase on the GSM8K math dataset and a 24.13% increase on the Tracking_Objects spatial reasoning task.", "raw": "According to the research, SCoT showed significant improvements over standard CoT across various datasets, including a 21.05% increase on the GSM8K math dataset and a 24.13% increase on the Tracking_Objects spatial reasoning task.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Changes in the Prompt Structure:", "raw": "Changes in the Prompt Structure:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "The SCoT prompt typically consists of five components:", "raw": "The SCoT prompt typically consists of five components:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Role: Defines the expert role the model should assume.", "raw": "- Role: Defines the expert role the model should assume.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Workflow: Outlines the steps for strategy identification and application.", "raw": "- Workflow: Outlines the steps for strategy identification and application.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Rules: Specifies guidelines for generating answers.", "raw": "- Rules: Specifies guidelines for generating answers.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Initialization: Sets up the task.", "raw": "- Initialization: Sets up the task.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Task Input: Provides the specific problem to solve.", "raw": "- Task Input: Provides the specific problem to solve.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Strategy Generation:", "raw": "Strategy Generation:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "The model is prompted to generate strategic knowledge relevant to the problem domain. For example, in mathematics, it might favor elegant solutions like using arithmetic series formulas over brute-force calculations.", "raw": "The model is prompted to generate strategic knowledge relevant to the problem domain. For example, in mathematics, it might favor elegant solutions like using arithmetic series formulas over brute-force calculations.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Guided Reasoning:", "raw": "Guided Reasoning:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Using the elicited strategy, the model then generates a chain-of-thought reasoning path. This approach aims to produce more stable and higher-quality outputs compared to standard chain-of-thought methods.", "raw": "Using the elicited strategy, the model then generates a chain-of-thought reasoning path. This approach aims to produce more stable and higher-quality outputs compared to standard chain-of-thought methods.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Read the full paper: ", "raw": "Read the full paper: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/abs/2409.03271", "resource": null, "url": null, "href": "https://arxiv.org/abs/2409.03271", "user": null, "lang": null, "code": null, "label": null } ]
Google's Chain-of-Thought (CoT) is one of the most effective ways to improve LLMs' reasoning. Researchers have now developed a novel approach called Strategic Chain-of-Thought (SCoT) to enhance the reasoning capabilities of large language models even further. ๐Ÿง  SCoT uses a two-stage process within a single prompt: - Strategy Elicitation: The model first identifies and determines an effective problem-solving strategy for the given task. This becomes the strategic knowledge that guides the reasoning process. - Strategy Application: The model then applies the identified strategic knowledge to solve the problem and generate the final answer. Essentially, SCoT integrates strategic knowledge to guide reasoning without relying on external knowledge sources or multiple queries. According to the research, SCoT showed significant improvements over standard CoT across various datasets, including a 21.05% increase on the GSM8K math dataset and a 24.13% increase on the Tracking_Objects spatial reasoning task. Changes in the Prompt Structure: The SCoT prompt typically consists of five components: - Role: Defines the expert role the model should assume. - Workflow: Outlines the steps for strategy identification and application. - Rules: Specifies guidelines for generating answers. - Initialization: Sets up the task. - Task Input: Provides the specific problem to solve. Strategy Generation: The model is prompted to generate strategic knowledge relevant to the problem domain. For example, in mathematics, it might favor elegant solutions like using arithmetic series formulas over brute-force calculations. Guided Reasoning: Using the elicited strategy, the model then generates a chain-of-thought reasoning path. This approach aims to produce more stable and higher-quality outputs compared to standard chain-of-thought methods. Read the full paper: https://arxiv.org/abs/2409.03271
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 197, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/-sAeV04eQKJUXbJXGLrJ0.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "bitdeep", "den0620", "Jawaher786", "createtheimaginable", "ChuGyouk", "Norod78" ], "count": 7 }, { "reaction": "๐Ÿ”ฅ", "users": [ "createtheimaginable", "DIvAndrey" ], "count": 2 } ]
2024-09-07T08:05:35.000Z
2024-09-08T17:53:33.344Z
[ { "avatarUrl": "/avatars/1f7026c98fa415c088c65ec8a65c9b60.svg", "fullname": "Adrian Murat Ozdemir", "name": "muratowski", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false } ]
/posts/singhsidhukuldeep/456542013174124
1,850
1
326367703139287
[ { "type": "text", "value": "i just made the best 0.5b model to date (again)", "raw": "i just made the best 0.5b model to date (again)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "its name is arco and is ready to fight any 0.5b model at arc challenge", "raw": "its name is arco and is ready to fight any 0.5b model at arc challenge", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/appvoid/arco", "resource": { "type": "model", "id": "appvoid/arco", "discussionNum": null }, "url": "https://huggingface.co/appvoid/arco", "href": null, "user": null, "lang": null, "code": null, "label": null } ]
i just made the best 0.5b model to date (again) its name is arco and is ready to fight any 0.5b model at arc challenge https://huggingface.co/appvoid/arco
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a813dedbb9e28866a91b27/zs-RWFuXs17IfPUhxQaei.jpeg", "fullname": "appvoid", "name": "appvoid", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 35, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/62a813dedbb9e28866a91b27/7QIK7iyY-wXpprlHwqbqv.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "Tonic", "AtAndDev", "louisbrulenaudet" ], "count": 4 }, { "reaction": "๐Ÿ”ฅ", "users": [ "nicolollo", "AtAndDev", "TobDeBer", "cnmoro" ], "count": 4 } ]
2024-09-06T23:59:50.000Z
2024-09-09T11:26:03.415Z
[]
/posts/appvoid/326367703139287
1,281
6
460338189482262
[ { "type": "text", "value": "Reposting from twitter:", "raw": "Reposting from twitter:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Just so you all know, I'll be on vacation for the following two weeks and away from home! I'm hoping to get on at least once a day to load up some quants, but I won't be as bleeding edge and on the ball :) feel free to shoot me a message if you see one I should make!", "raw": "Just so you all know, I'll be on vacation for the following two weeks and away from home! I'm hoping to get on at least once a day to load up some quants, but I won't be as bleeding edge and on the ball :) feel free to shoot me a message if you see one I should make!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "In the meantime if you need something bleeding edge make sure to check out ", "raw": "In the meantime if you need something bleeding edge make sure to check out ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@MaziyarPanahi", "resource": null, "url": null, "href": null, "user": "MaziyarPanahi", "lang": null, "code": null, "label": null }, { "type": "text", "value": " or ", "raw": " or ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@bullerwins", "resource": null, "url": null, "href": null, "user": "bullerwins", "lang": null, "code": null, "label": null }, { "type": "text", "value": " who both put out great work!", "raw": " who both put out great work!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
Reposting from twitter: Just so you all know, I'll be on vacation for the following two weeks and away from home! I'm hoping to get on at least once a day to load up some quants, but I won't be as bleeding edge and on the ball :) feel free to shoot me a message if you see one I should make! In the meantime if you need something bleeding edge make sure to check out @MaziyarPanahi or @bullerwins who both put out great work!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6435718aaaef013d1aec3b8b/XKf-8MA47tjVAM6SCX0MP.jpeg", "fullname": "Bartowski", "name": "bartowski", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 2735, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65cccccefb8ab7fcc2c6424c/0dlk5hmzNhTWr8j9E1DXP.jpeg", "fullname": "Rodri Mora", "name": "bullerwins", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 51 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5fd5e18a90b6dc4633f6d292/gZXHW5dd9R86AV9LMZ--y.png", "fullname": "Maziyar Panahi", "name": "MaziyarPanahi", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 1496 } ]
[ { "reaction": "โค๏ธ", "users": [ "Joseph717171", "syrupsweety", "not-lain", "bullerwins", "MaziyarPanahi", "MarinaraSpaghetti", "osanseviero", "Whiteshadow12", "victor", "JoeySalmons", "celsowm", "hudzax", "win10" ], "count": 13 }, { "reaction": "๐Ÿ˜Ž", "users": [ "Joseph717171", "John6666", "AIGUYCONTENT", "MaziyarPanahi", "osanseviero", "neoopus" ], "count": 6 }, { "reaction": "๐Ÿ”ฅ", "users": [ "Joseph717171", "MaziyarPanahi" ], "count": 2 }, { "reaction": "๐Ÿ‘", "users": [ "Kynesyn" ], "count": 1 } ]
2024-09-06T23:29:41.000Z
2024-10-16T21:11:46.753Z
[ { "avatarUrl": "/avatars/ea4398745974d781ae9dc0e95b12cabe.svg", "fullname": "Joseph", "name": "Joseph717171", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 22, "isFollowing": false }, { "avatarUrl": "/avatars/99a24b1d41e468fed0eca43545090284.svg", "fullname": "Walter Lima", "name": "waltervix", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }, { "avatarUrl": "/avatars/ae2b8b99b8c9d2b8a2db454806e1f5d9.svg", "fullname": "Tim Kyn", "name": "Kynesyn", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "/avatars/df614f21f59bc6e4d1f934169e4aec99.svg", "fullname": "Andre ", "name": "Gigahardglob", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/bartowski/460338189482262
28,993
4
445706346542195
[ { "type": "text", "value": "FLUX Prompt Generator Updates", "raw": "FLUX Prompt Generator Updates", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- ", "raw": "- ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/gokaygokay/FLUX-Prompt-Generator", "resource": { "type": "space", "id": "gokaygokay/FLUX-Prompt-Generator", "discussionNum": null }, "url": "https://huggingface.co/spaces/gokaygokay/FLUX-Prompt-Generator", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- There are now hundreds of new selections across diverse categories, each offering a lot of choices:", "raw": "- There are now hundreds of new selections across diverse categories, each offering a lot of choices:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Architecture, Art, Artist, Brands, Character, Cinematic, Fashion, Feelings, Geography, Human, Interaction, Keywords, Objects, People, Photography, Plots, Poses, Scene, Science, Stuff, Time, Typography, Vehicle, Video Game", "raw": "Architecture, Art, Artist, Brands, Character, Cinematic, Fashion, Feelings, Geography, Human, Interaction, Keywords, Objects, People, Photography, Plots, Poses, Scene, Science, Stuff, Time, Typography, Vehicle, Video Game", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- In addition to Hugging Face, I've integrated new LLM providers: Groq, OpenAI, and Claude.", "raw": "- In addition to Hugging Face, I've integrated new LLM providers: Groq, OpenAI, and Claude.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Upgraded Vision Language Models (VLMs): We now feature Qwen2-VL, JoyCaption and Florence-2-large.", "raw": "- Upgraded Vision Language Models (VLMs): We now feature Qwen2-VL, JoyCaption and Florence-2-large.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- New specialized system prompts for various styles and themes, including Happy, Simple, Poster, Only Objects, No Figure, Landscape, Fantasy.", "raw": "- New specialized system prompts for various styles and themes, including Happy, Simple, Poster, Only Objects, No Figure, Landscape, Fantasy.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
FLUX Prompt Generator Updates - https://huggingface.co/spaces/gokaygokay/FLUX-Prompt-Generator - There are now hundreds of new selections across diverse categories, each offering a lot of choices: Architecture, Art, Artist, Brands, Character, Cinematic, Fashion, Feelings, Geography, Human, Interaction, Keywords, Objects, People, Photography, Plots, Poses, Scene, Science, Stuff, Time, Typography, Vehicle, Video Game - In addition to Hugging Face, I've integrated new LLM providers: Groq, OpenAI, and Claude. - Upgraded Vision Language Models (VLMs): We now feature Qwen2-VL, JoyCaption and Florence-2-large. - New specialized system prompts for various styles and themes, including Happy, Simple, Poster, Only Objects, No Figure, Landscape, Fantasy.
{ "avatarUrl": "/avatars/b9a6d8e11ec7a62ca2b819e0b6c37222.svg", "fullname": "gokay aydogan", "name": "gokaygokay", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 1100, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/630899601dd1e3075d975785/u_IZ43q0247UaH2_LK07W.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/630899601dd1e3075d975785/6MVx_ctCbmMXRdF2Dfmx6.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/630899601dd1e3075d975785/8V-yOsc-8v9MDOIDEo0IA.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/630899601dd1e3075d975785/1XKyGghgMJ2y3y2s_SRT1.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/630899601dd1e3075d975785/vdKrZg5_vWetRUnU0iQEg.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/630899601dd1e3075d975785/lqNCplC-A4mIXZlMIFP8A.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/630899601dd1e3075d975785/mndIHcOBYswRlUv4gUCtg.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/630899601dd1e3075d975785/Tl-jreh1SGZeCJf6Csb46.png" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "John6666", "YaTharThShaRma999", "KingNish", "ucsahin", "Chief-Inspector", "EmilyChan", "victor", "Felladrin", "Nyxie7", "gokaygokay" ], "count": 10 }, { "reaction": "๐Ÿค—", "users": [ "zohebk" ], "count": 1 } ]
2024-09-06T22:10:36.000Z
2024-10-16T22:23:51.435Z
[ { "avatarUrl": "/avatars/e61f8d637223b476bcafe96945b552e1.svg", "fullname": "hashed albaham", "name": "Hashed000", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/gokaygokay/445706346542195
6,096
1
861996108790591
[ { "type": "text", "value": "Yesterday ย ", "raw": "Yesterday ย ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@mattshumer", "resource": null, "url": null, "href": null, "user": "mattshumer", "lang": null, "code": null, "label": null }, { "type": "text", "value": " released ", "raw": " released ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/mattshumer/Reflection-Llama-3.1-70B", "resource": { "type": "model", "id": "mattshumer/Reflection-Llama-3.1-70B", "discussionNum": null }, "url": "https://huggingface.co/mattshumer/Reflection-Llama-3.1-70B", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": ", an impressive model that achieved incredible results in benchmarks like MMLU. The model was fine-tuned using Reflection-Tuning and the dataset used wasn't released, but I created a small recipe with distilabel that allows generating a dataset with a similar output format:", "raw": ", an impressive model that achieved incredible results in benchmarks like MMLU. The model was fine-tuned using Reflection-Tuning and the dataset used wasn't released, but I created a small recipe with distilabel that allows generating a dataset with a similar output format:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "1. We use MagPie ๐Ÿฆ in combination with ", "raw": "1. We use MagPie ๐Ÿฆ in combination with ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/meta-llama/Meta-Llama-3.1-70B-Instruct", "resource": null, "url": null, "href": "https://huggingface.co/meta-llama/Meta-Llama-3.1-70B-Instruct", "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " to generate reasoning instructions.", "raw": " to generate reasoning instructions.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "2. We generate a response again using ", "raw": "2. We generate a response again using ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/meta-llama/Meta-Llama-3.1-70B-Instruct", "resource": null, "url": null, "href": "https://huggingface.co/meta-llama/Meta-Llama-3.1-70B-Instruct", "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": ", but we steer the LLM to generate an specific output format using a custom system prompt. In the system prompt, we instruct the LLM that it will have first to think ๐Ÿ’ญ and have reflections that will help resolving ambiguities. After that, we instruct the LLM to generate an output based on the previous thinking ", "raw": ", but we steer the LLM to generate an specific output format using a custom system prompt. In the system prompt, we instruct the LLM that it will have first to think ๐Ÿ’ญ and have reflections that will help resolving ambiguities. After that, we instruct the LLM to generate an output based on the previous thinking ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "In this dataset ", "raw": "In this dataset ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/gabrielmbmb/distilabel-reflection-tuning", "resource": { "type": "dataset", "id": "gabrielmbmb/distilabel-reflection-tuning", "discussionNum": null }, "url": "https://huggingface.co/datasets/gabrielmbmb/distilabel-reflection-tuning", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " you can found 5 rows that I generated with this recipe. You can also found the code of the pipeline in the file called ", "raw": " you can found 5 rows that I generated with this recipe. You can also found the code of the pipeline in the file called ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "inline_code", "value": null, "raw": "`reflection.py`", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": "reflection.py", "label": null }, { "type": "text", "value": ".", "raw": ".", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
Yesterday ย @mattshumer released https://huggingface.co/mattshumer/Reflection-Llama-3.1-70B, an impressive model that achieved incredible results in benchmarks like MMLU. The model was fine-tuned using Reflection-Tuning and the dataset used wasn't released, but I created a small recipe with distilabel that allows generating a dataset with a similar output format: 1. We use MagPie ๐Ÿฆ in combination with https://huggingface.co/meta-llama/Meta-Llama-3.1-70B-Instruct to generate reasoning instructions. 2. We generate a response again using https://huggingface.co/meta-llama/Meta-Llama-3.1-70B-Instruct, but we steer the LLM to generate an specific output format using a custom system prompt. In the system prompt, we instruct the LLM that it will have first to think ๐Ÿ’ญ and have reflections that will help resolving ambiguities. After that, we instruct the LLM to generate an output based on the previous thinking In this dataset https://huggingface.co/datasets/gabrielmbmb/distilabel-reflection-tuning you can found 5 rows that I generated with this recipe. You can also found the code of the pipeline in the file called `reflection.py`.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60f2fc91b92afccb7c34b8ed/whF6nGtyTAhbtiWJJnL9e.png", "fullname": "Gabriel Martรญn Blรกzquez", "name": "gabrielmbmb", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 87, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60f2fc91b92afccb7c34b8ed/Uz2Yc6O5J-PL7JZsin3cs.png" } ]
[ { "avatarUrl": "/avatars/821175d73c2ae3ceb28d445963c95722.svg", "fullname": "Matt Shumer", "name": "mattshumer", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 344 } ]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "clem", "prithivMLmods", "KonradSzafer", "Svngoku", "John6666", "den0620", "osanseviero", "gabrielmbmb", "louisbrulenaudet" ], "count": 9 }, { "reaction": "โค๏ธ", "users": [ "clem", "osanseviero" ], "count": 2 }, { "reaction": "๐Ÿ‘", "users": [ "dashfunnydashdash" ], "count": 1 } ]
2024-09-06T16:42:53.000Z
2024-09-06T16:42:53.578Z
[]
/posts/gabrielmbmb/861996108790591
1,781
0
113923089053942
[ { "type": "text", "value": "4 million chess puzzles", "raw": "4 million chess puzzles", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
4 million chess puzzles
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1646492542174-5e70f6048ce3c604d78fe133.jpeg", "fullname": "Christopher Akiki", "name": "christopher", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 66, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5e70f6048ce3c604d78fe133/BA0YvU282s9WEY5zEeMMp.png" } ]
[]
[ { "reaction": "โค๏ธ", "users": [ "hemanuelly", "hunken", "Sri-Vigneshwar-DJ", "Tonioesparza", "den0620", "Akash3104" ], "count": 6 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "hemanuelly", "Akash3104" ], "count": 3 } ]
2024-09-06T14:05:58.000Z
2024-09-06T14:05:58.827Z
[]
/posts/christopher/113923089053942
1,262
0
131870164983456
[ { "type": "text", "value": "\"LLM inference at scale with TGI\". Cool blogpost: ", "raw": "\"LLM inference at scale with TGI\". Cool blogpost: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://www.adyen.com/knowledge-hub/llm-inference-at-scale-with-tgi", "resource": null, "url": null, "href": "https://www.adyen.com/knowledge-hub/llm-inference-at-scale-with-tgi", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Well done ", "raw": "Well done ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@martinigoyanes", "resource": null, "url": null, "href": null, "user": "martinigoyanes", "lang": null, "code": null, "label": null }, { "type": "text", "value": " ", "raw": " ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@rafa-hernandez", "resource": null, "url": null, "href": null, "user": "rafa-hernandez", "lang": null, "code": null, "label": null }, { "type": "text", "value": " ", "raw": " ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@Vidusharma", "resource": null, "url": null, "href": null, "user": "Vidusharma", "lang": null, "code": null, "label": null }, { "type": "text", "value": " ", "raw": " ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@frisokingma", "resource": null, "url": null, "href": null, "user": "frisokingma", "lang": null, "code": null, "label": null }, { "type": "text", "value": " ", "raw": " ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@hannahwright", "resource": null, "url": null, "href": null, "user": "hannahwright", "lang": null, "code": null, "label": null }, { "type": "text", "value": " ", "raw": " ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@jeanmarcs", "resource": null, "url": null, "href": null, "user": "jeanmarcs", "lang": null, "code": null, "label": null }, { "type": "text", "value": " ", "raw": " ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@antonioramos", "resource": null, "url": null, "href": null, "user": "antonioramos", "lang": null, "code": null, "label": null }, { "type": "text", "value": " & the whole ", "raw": " & the whole ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/adyen", "resource": null, "url": null, "href": "https://huggingface.co/adyen", "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " team. Could be useful to cross-post here: ", "raw": " team. Could be useful to cross-post here: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/community", "resource": null, "url": null, "href": "https://huggingface.co/blog/community", "user": null, "lang": null, "code": null, "label": null } ]
"LLM inference at scale with TGI". Cool blogpost: https://www.adyen.com/knowledge-hub/llm-inference-at-scale-with-tgi Well done @martinigoyanes @rafa-hernandez @Vidusharma @frisokingma @hannahwright @jeanmarcs @antonioramos & the whole https://huggingface.co/adyen team. Could be useful to cross-post here: https://huggingface.co/blog/community
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg", "fullname": "Clem ๐Ÿค—", "name": "clem", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1734, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5e67bdd61009063689407479/85OdIpyc0cSmcqBLEhaJN.png" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/650af18e6554462d261e17d3/xesBAU_i3KI3nZMe58Vxe.jpeg", "fullname": "Antonio Ramos", "name": "antonioramos", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1 }, { "avatarUrl": "/avatars/a783c959e600b04bf2de8037d074ec70.svg", "fullname": "Friso Kingma", "name": "frisokingma", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2 }, { "avatarUrl": "/avatars/75be3faf1def47be6b3f526752de8206.svg", "fullname": "Hannah Wright", "name": "hannahwright", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2 }, { "avatarUrl": "/avatars/10be1afd9299f52d4d08b952c0c22e5b.svg", "fullname": "Jean-Marc Saad", "name": "jeanmarcs", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65de001d6a6643b02251fd2a/8YaiGgRzkOG6WAsY-ny-t.jpeg", "fullname": "Martin Iglesias Goyanes", "name": "martinigoyanes", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3 }, { "avatarUrl": "/avatars/f8ab4c515e720b8601d83b80376d66df.svg", "fullname": "Rafael Hernandez Murcia", "name": "rafa-hernandez", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65ca09a097971388a5371284/xBvVTfFOE5n46phf0EBx6.png", "fullname": "Viddy", "name": "Vidusharma", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2 } ]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "vilarin", "KingNish", "victor", "prithivMLmods", "nbroad" ], "count": 5 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "nbroad", "den0620" ], "count": 3 } ]
2024-09-06T13:59:33.000Z
2024-09-06T15:51:14.656Z
[ { "avatarUrl": "/avatars/f8ab4c515e720b8601d83b80376d66df.svg", "fullname": "Rafael Hernandez Murcia", "name": "rafa-hernandez", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65de001d6a6643b02251fd2a/8YaiGgRzkOG6WAsY-ny-t.jpeg", "fullname": "Martin Iglesias Goyanes", "name": "martinigoyanes", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false } ]
/posts/clem/131870164983456
1,754
2
696626368581978
[ { "type": "text", "value": "๐Ÿ’พ๐Ÿง How much VRAM will you need for training your AI model? ๐Ÿ’พ๐Ÿง ", "raw": "๐Ÿ’พ๐Ÿง How much VRAM will you need for training your AI model? ๐Ÿ’พ๐Ÿง ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Check out this app where you convert:", "raw": "Check out this app where you convert:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Pytorch/tensorflow summary -> required VRAM", "raw": "Pytorch/tensorflow summary -> required VRAM", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "or", "raw": "or", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Parameter count -> required VRAM", "raw": "Parameter count -> required VRAM", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Use it in: ", "raw": "Use it in: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "http://howmuchvram.com", "resource": null, "url": null, "href": "http://howmuchvram.com", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "And everything is open source! Ask for new functionalities or contribute in:", "raw": "And everything is open source! Ask for new functionalities or contribute in:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://github.com/AlexBodner/How_Much_VRAM", "resource": null, "url": null, "href": "https://github.com/AlexBodner/How_Much_VRAM", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "If it's useful to you leave a star ๐ŸŒŸand share it to someone that will find the tool useful!", "raw": "If it's useful to you leave a star ๐ŸŒŸand share it to someone that will find the tool useful!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "More discussion in: ", "raw": "More discussion in: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://x.com/AlexBodner_/status/1832054850294812679", "resource": null, "url": null, "href": "https://x.com/AlexBodner_/status/1832054850294812679", "user": null, "lang": null, "code": null, "label": null } ]
๐Ÿ’พ๐Ÿง How much VRAM will you need for training your AI model? ๐Ÿ’พ๐Ÿง  Check out this app where you convert: Pytorch/tensorflow summary -> required VRAM or Parameter count -> required VRAM Use it in: http://howmuchvram.com And everything is open source! Ask for new functionalities or contribute in: https://github.com/AlexBodner/How_Much_VRAM If it's useful to you leave a star ๐ŸŒŸand share it to someone that will find the tool useful! More discussion in: https://x.com/AlexBodner_/status/1832054850294812679
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/658880d499ed106ac888dd7a/wMv9-ZsJUw4QQnld_cci7.jpeg", "fullname": "Alexander Dylan Bodner", "name": "AlexBodner", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 28, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/658880d499ed106ac888dd7a/3pE5_tB4Q4LBtj8AZklJQ.mp4" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-09-06T13:55:12.000Z
2024-09-06T13:55:12.397Z
[]
/posts/AlexBodner/696626368581978
360
0
607838594248861
[ { "type": "text", "value": " I've been working on a Space to make it super easy to create notebooks and help users quickly understand and manipulate their data!", "raw": " I've been working on a Space to make it super easy to create notebooks and help users quickly understand and manipulate their data!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "With just a few clicks automatically generate notebooks for:", "raw": "With just a few clicks automatically generate notebooks for:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ“Š Exploratory Data Analysis", "raw": "๐Ÿ“Š Exploratory Data Analysis", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿง  Text Embeddings", "raw": "๐Ÿง  Text Embeddings", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿค– Retrieval-Augmented Generation (RAG) ", "raw": "๐Ÿค– Retrieval-Augmented Generation (RAG) ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "โœจ Automatic training is coming soon!", "raw": "โœจ Automatic training is coming soon!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Check it out here ", "raw": "Check it out here ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/asoria/auto-notebook-creator", "resource": { "type": "space", "id": "asoria/auto-notebook-creator", "discussionNum": null }, "url": "https://huggingface.co/spaces/asoria/auto-notebook-creator", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Appreciate any feedback to improve this tool ๐Ÿค—", "raw": "Appreciate any feedback to improve this tool ๐Ÿค—", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
I've been working on a Space to make it super easy to create notebooks and help users quickly understand and manipulate their data! With just a few clicks automatically generate notebooks for: ๐Ÿ“Š Exploratory Data Analysis ๐Ÿง  Text Embeddings ๐Ÿค– Retrieval-Augmented Generation (RAG) โœจ Automatic training is coming soon! Check it out here https://huggingface.co/spaces/asoria/auto-notebook-creator Appreciate any feedback to improve this tool ๐Ÿค—
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674055965173-noauth.jpeg", "fullname": "Andrea Soria", "name": "asoria", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 59, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "Saugatkafley", "jmamedov", "AtAndDev" ], "count": 4 }, { "reaction": "๐Ÿคฏ", "users": [ "davanstrien" ], "count": 1 } ]
2024-09-06T13:28:59.000Z
2024-09-06T13:28:59.576Z
[]
/posts/asoria/607838594248861
816
0
850395082965136
[ { "type": "text", "value": "๐ŸŒŸ Argilla v2.1.0 goes multi-modal: Image Field, Dark Mode, Enhanched Hugging Face Hub imports and more!", "raw": "๐ŸŒŸ Argilla v2.1.0 goes multi-modal: Image Field, Dark Mode, Enhanched Hugging Face Hub imports and more!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ–ผ Image Field: Seamlessly work with multimodal datasets", "raw": "๐Ÿ–ผ Image Field: Seamlessly work with multimodal datasets", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐ŸŒ“ Dark Mode: Reduce eye strain with our sleek new look", "raw": "๐ŸŒ“ Dark Mode: Reduce eye strain with our sleek new look", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿค— Enhanced Hugging Face Hub import with the SDK", "raw": "๐Ÿค— Enhanced Hugging Face Hub import with the SDK", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ‡ช๐Ÿ‡ธ Spanish UI: Breaking language barriers", "raw": "๐Ÿ‡ช๐Ÿ‡ธ Spanish UI: Breaking language barriers", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Plus more improvements to supercharge your model curation workflow!", "raw": "Plus more improvements to supercharge your model curation workflow!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Check out the full announcement for details and code examples: ", "raw": "Check out the full announcement for details and code examples: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://github.com/argilla-io/argilla/compare/v2.0.1...v2.1.0", "resource": null, "url": null, "href": "https://github.com/argilla-io/argilla/compare/v2.0.1...v2.1.0", "user": null, "lang": null, "code": null, "label": null } ]
๐ŸŒŸ Argilla v2.1.0 goes multi-modal: Image Field, Dark Mode, Enhanched Hugging Face Hub imports and more! ๐Ÿ–ผ Image Field: Seamlessly work with multimodal datasets ๐ŸŒ“ Dark Mode: Reduce eye strain with our sleek new look ๐Ÿค— Enhanced Hugging Face Hub import with the SDK ๐Ÿ‡ช๐Ÿ‡ธ Spanish UI: Breaking language barriers Plus more improvements to supercharge your model curation workflow! Check out the full announcement for details and code examples: https://github.com/argilla-io/argilla/compare/v2.0.1...v2.1.0
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1677141720071-634ff41ff32062e9eb7b06a3.jpeg", "fullname": "David Berenstein", "name": "davidberenstein1957", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 148, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ˜Ž", "users": [ "davanstrien", "gabrielmbmb", "dvilasuero", "Ameeeee", "louisbrulenaudet", "clem", "John6666", "KingNish", "AtAndDev" ], "count": 9 }, { "reaction": "๐Ÿš€", "users": [ "gabrielmbmb", "dvilasuero", "Ameeeee", "clem", "AtAndDev" ], "count": 5 }, { "reaction": "๐Ÿ”ฅ", "users": [ "Ameeeee", "clem", "KingNish", "AtAndDev", "gabrielmbmb" ], "count": 5 } ]
2024-09-06T12:21:30.000Z
2024-09-06T12:21:30.539Z
[]
/posts/davidberenstein1957/850395082965136
1,818
0
206518965814889
[ { "type": "text", "value": "Wanted to train a FLUX model using out-of-copyright images, so I curated concept art images from NASA. ", "raw": "Wanted to train a FLUX model using out-of-copyright images, so I curated concept art images from NASA. ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Model: ", "raw": "Model: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/davanstrien/nasa_concept_art", "resource": null, "url": null, "href": "https://huggingface.co/davanstrien/nasa_concept_art", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Dataset: ", "raw": "Dataset: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/davanstrien/nasa_concept_art", "resource": { "type": "dataset", "id": "davanstrien/nasa_concept_art", "discussionNum": null }, "url": "https://huggingface.co/datasets/davanstrien/nasa_concept_art", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "So far, training was done without captions, but I'm experimenting with using VLLMs to generate captions to see if that improves the model.", "raw": "So far, training was done without captions, but I'm experimenting with using VLLMs to generate captions to see if that improves the model.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
Wanted to train a FLUX model using out-of-copyright images, so I curated concept art images from NASA. Model: https://huggingface.co/davanstrien/nasa_concept_art Dataset: https://huggingface.co/datasets/davanstrien/nasa_concept_art So far, training was done without captions, but I'm experimenting with using VLLMs to generate captions to see if that improves the model.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1627505688463-60107b385ac3e86b3ea4fc34.jpeg", "fullname": "Daniel van Strien", "name": "davanstrien", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 404, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 }, { "reaction": "โค๏ธ", "users": [ "louisbrulenaudet" ], "count": 1 } ]
2024-09-06T11:41:40.000Z
2024-09-06T11:41:40.962Z
[]
/posts/davanstrien/206518965814889
435
0
374226305257230
[ { "type": "text", "value": "๐Ÿคฏ ๐—” ๐—ป๐—ฒ๐˜„ ๐Ÿณ๐Ÿฌ๐—• ๐—ผ๐—ฝ๐—ฒ๐—ป-๐˜„๐—ฒ๐—ถ๐—ด๐—ต๐˜๐˜€ ๐—Ÿ๐—Ÿ๐—  ๐—ฏ๐—ฒ๐—ฎ๐˜๐˜€ ๐—–๐—น๐—ฎ๐˜‚๐—ฑ๐—ฒ-๐Ÿฏ.๐Ÿฑ-๐—ฆ๐—ผ๐—ป๐—ป๐—ฒ๐˜ ๐—ฎ๐—ป๐—ฑ ๐—š๐—ฃ๐—ง-๐Ÿฐ๐—ผ!", "raw": "๐Ÿคฏ ๐—” ๐—ป๐—ฒ๐˜„ ๐Ÿณ๐Ÿฌ๐—• ๐—ผ๐—ฝ๐—ฒ๐—ป-๐˜„๐—ฒ๐—ถ๐—ด๐—ต๐˜๐˜€ ๐—Ÿ๐—Ÿ๐—  ๐—ฏ๐—ฒ๐—ฎ๐˜๐˜€ ๐—–๐—น๐—ฎ๐˜‚๐—ฑ๐—ฒ-๐Ÿฏ.๐Ÿฑ-๐—ฆ๐—ผ๐—ป๐—ป๐—ฒ๐˜ ๐—ฎ๐—ป๐—ฑ ๐—š๐—ฃ๐—ง-๐Ÿฐ๐—ผ!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@mattshumer", "resource": null, "url": null, "href": null, "user": "mattshumer", "lang": null, "code": null, "label": null }, { "type": "text", "value": ", CEO from Hyperwrite AI, had an idea he wanted to try out: why not fine-tune LLMs to always output their thoughts in specific parts, delineated by <thinking> tags?", "raw": ", CEO from Hyperwrite AI, had an idea he wanted to try out: why not fine-tune LLMs to always output their thoughts in specific parts, delineated by <thinking> tags?", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Even better: inside of that, you could nest other sections, to reflect critically on previous output. Letโ€™s name this part <reflection>. Planning is also put in a separate step.", "raw": "Even better: inside of that, you could nest other sections, to reflect critically on previous output. Letโ€™s name this part <reflection>. Planning is also put in a separate step.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "He named the method โ€œReflection tuningโ€ and set out to fine-tune a Llama-3.1-70B with it.", "raw": "He named the method โ€œReflection tuningโ€ and set out to fine-tune a Llama-3.1-70B with it.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Well it turns out, it works mind-boggingly well!", "raw": "Well it turns out, it works mind-boggingly well!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿคฏ Reflection-70B beats GPT-4o, Sonnet-3.5, and even the much bigger Llama-3.1-405B!", "raw": "๐Ÿคฏ Reflection-70B beats GPT-4o, Sonnet-3.5, and even the much bigger Llama-3.1-405B!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐—ง๐—Ÿ;๐——๐—ฅ", "raw": "๐—ง๐—Ÿ;๐——๐—ฅ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐ŸฅŠ This new 70B open-weights model beats GPT-4o, Claude Sonnet, et al.", "raw": "๐ŸฅŠ This new 70B open-weights model beats GPT-4o, Claude Sonnet, et al.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "โฐ 405B in training, coming soon", "raw": "โฐ 405B in training, coming soon", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ“š Report coming next week", "raw": "๐Ÿ“š Report coming next week", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "โš™๏ธ Uses GlaiveAI synthetic data", "raw": "โš™๏ธ Uses GlaiveAI synthetic data", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿค— Available on HF!", "raw": "๐Ÿค— Available on HF!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Iโ€™m starting an Inference Endpoint right now for this model to give it a spin!", "raw": "Iโ€™m starting an Inference Endpoint right now for this model to give it a spin!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Check it out ๐Ÿ‘‰ ", "raw": "Check it out ๐Ÿ‘‰ ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/mattshumer/Reflection-Llama-3.1-70B", "resource": { "type": "model", "id": "mattshumer/Reflection-Llama-3.1-70B", "discussionNum": null }, "url": "https://huggingface.co/mattshumer/Reflection-Llama-3.1-70B", "href": null, "user": null, "lang": null, "code": null, "label": null } ]
๐Ÿคฏ ๐—” ๐—ป๐—ฒ๐˜„ ๐Ÿณ๐Ÿฌ๐—• ๐—ผ๐—ฝ๐—ฒ๐—ป-๐˜„๐—ฒ๐—ถ๐—ด๐—ต๐˜๐˜€ ๐—Ÿ๐—Ÿ๐—  ๐—ฏ๐—ฒ๐—ฎ๐˜๐˜€ ๐—–๐—น๐—ฎ๐˜‚๐—ฑ๐—ฒ-๐Ÿฏ.๐Ÿฑ-๐—ฆ๐—ผ๐—ป๐—ป๐—ฒ๐˜ ๐—ฎ๐—ป๐—ฑ ๐—š๐—ฃ๐—ง-๐Ÿฐ๐—ผ! @mattshumer, CEO from Hyperwrite AI, had an idea he wanted to try out: why not fine-tune LLMs to always output their thoughts in specific parts, delineated by <thinking> tags? Even better: inside of that, you could nest other sections, to reflect critically on previous output. Letโ€™s name this part <reflection>. Planning is also put in a separate step. He named the method โ€œReflection tuningโ€ and set out to fine-tune a Llama-3.1-70B with it. Well it turns out, it works mind-boggingly well! ๐Ÿคฏ Reflection-70B beats GPT-4o, Sonnet-3.5, and even the much bigger Llama-3.1-405B! ๐—ง๐—Ÿ;๐——๐—ฅ ๐ŸฅŠ This new 70B open-weights model beats GPT-4o, Claude Sonnet, et al. โฐ 405B in training, coming soon ๐Ÿ“š Report coming next week โš™๏ธ Uses GlaiveAI synthetic data ๐Ÿค— Available on HF! Iโ€™m starting an Inference Endpoint right now for this model to give it a spin! Check it out ๐Ÿ‘‰ https://huggingface.co/mattshumer/Reflection-Llama-3.1-70B
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg", "fullname": "Aymeric Roucher", "name": "m-ric", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 476, "isFollowing": false }
[]
[ { "avatarUrl": "/avatars/821175d73c2ae3ceb28d445963c95722.svg", "fullname": "Matt Shumer", "name": "mattshumer", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 344 } ]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "osanseviero", "DataSoul", "xi0v", "Joseph717171" ], "count": 5 }, { "reaction": "๐Ÿ‘", "users": [ "TahirC", "Yuuru", "iandeanschaefer", "trollek", "Joseph717171" ], "count": 5 }, { "reaction": "๐Ÿค—", "users": [ "louisbrulenaudet", "YaTharThShaRma999", "Joseph717171" ], "count": 3 } ]
2024-09-06T07:40:00.000Z
2024-09-08T19:15:12.418Z
[ { "avatarUrl": "/avatars/1aea33e7602a81f6b6ed98412dda9b41.svg", "fullname": "GR", "name": "gr0010", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63596f9f0cd44992263f2105/4CCZECojd7tkbOxMryiww.png", "fullname": "Trolle Karlsson", "name": "trollek", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 18, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64175bc2b03817ada642291f/V3mhc8Y0saSgXbp--2HcE.png", "fullname": "Kh", "name": "raidhon", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false } ]
/posts/m-ric/374226305257230
1,912
3
873815932669665
[ { "type": "text", "value": "I'm building an AI for healthcare support for professionals, any advice? I could create a new app here but it needs a lot of trainer (Im newbie in this kind of stuff) ", "raw": "I'm building an AI for healthcare support for professionals, any advice? I could create a new app here but it needs a lot of trainer (Im newbie in this kind of stuff) ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Thank you, guys!!!! ", "raw": "Thank you, guys!!!! ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
I'm building an AI for healthcare support for professionals, any advice? I could create a new app here but it needs a lot of trainer (Im newbie in this kind of stuff) Thank you, guys!!!!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/66d41e5566bac2ac7d9460aa/kMulJdATzvrPNMs7pPVRX.png", "fullname": "Lozt B", "name": "messmercod", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66d41e5566bac2ac7d9460aa/bEXFSv9XsukdJHgpTMhuB.webp" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "hemanuelly" ], "count": 2 }, { "reaction": "โค๏ธ", "users": [ "hemanuelly" ], "count": 1 } ]
2024-09-06T05:37:36.000Z
2024-09-06T05:56:44.980Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/643ac5d2e2b979ae6144d68c/Z7PCNopn4cQeAYnVJDoqG.png", "fullname": "nyuuzyou", "name": "nyuuzyou", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 58, "isFollowing": false } ]
/posts/messmercod/873815932669665
752
1
232920597638334
[ { "type": "text", "value": "I just released Sentence Transformers v3.3.0 & it's huge! 4.5x speedup for CPU with OpenVINO int8 static quantization, training with prompts for a free perf. boost, PEFT integration, evaluation on NanoBEIR, and more! Details:", "raw": "I just released Sentence Transformers v3.3.0 & it's huge! 4.5x speedup for CPU with OpenVINO int8 static quantization, training with prompts for a free perf. boost, PEFT integration, evaluation on NanoBEIR, and more! Details:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "1. We integrate Post-Training Static Quantization using OpenVINO, a very efficient solution for CPUs that processes 4.78x as many texts per second on average, while only hurting performance by 0.36% on average. There's a new ", "raw": "1. We integrate Post-Training Static Quantization using OpenVINO, a very efficient solution for CPUs that processes 4.78x as many texts per second on average, while only hurting performance by 0.36% on average. There's a new ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "inline_code", "value": null, "raw": "`export_static_quantized_openvino_model`", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": "export_static_quantized_openvino_model", "label": null }, { "type": "text", "value": " method to quantize a model.", "raw": " method to quantize a model.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "2. We add the option to train with prompts, e.g. strings like \"query: \", \"search_document: \" or \"Represent this sentence for searching relevant passages: \". It's as simple as using the ", "raw": "2. We add the option to train with prompts, e.g. strings like \"query: \", \"search_document: \" or \"Represent this sentence for searching relevant passages: \". It's as simple as using the ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "inline_code", "value": null, "raw": "`prompts`", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": "prompts", "label": null }, { "type": "text", "value": " argument in ", "raw": " argument in ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "inline_code", "value": null, "raw": "`SentenceTransformerTrainingArguments`", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": "SentenceTransformerTrainingArguments", "label": null }, { "type": "text", "value": ". Our experiments show that you can easily reach 0.66% to 0.90% relative performance improvement on NDCG@10 at no extra cost by adding \"query: \" before each training query and \"document: \" before each training answer.", "raw": ". Our experiments show that you can easily reach 0.66% to 0.90% relative performance improvement on NDCG@10 at no extra cost by adding \"query: \" before each training query and \"document: \" before each training answer.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "3. Sentence Transformers now supports training PEFT adapters via 7 new methods for adding new adapters or loading pre-trained ones. You can also directly load a trained adapter with SentenceTransformer as if it's a normal model. Very useful for e.g. 1) training multiple adapters on 1 base model, 2) training bigger models than otherwise possible, or 3) cheaply hosting multiple models by switching multiple adapters on 1 base model.", "raw": "3. Sentence Transformers now supports training PEFT adapters via 7 new methods for adding new adapters or loading pre-trained ones. You can also directly load a trained adapter with SentenceTransformer as if it's a normal model. Very useful for e.g. 1) training multiple adapters on 1 base model, 2) training bigger models than otherwise possible, or 3) cheaply hosting multiple models by switching multiple adapters on 1 base model.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "4. We added easy evaluation on NanoBEIR, a subset of BEIR a.k.a. the MTEB Retrieval benchmark. It contains 13 datasets with 50 queries and up to 10k documents each. Evaluation is fast, and can easily be done during training to track your model's performance on general-purpose information retrieval tasks.", "raw": "4. We added easy evaluation on NanoBEIR, a subset of BEIR a.k.a. the MTEB Retrieval benchmark. It contains 13 datasets with 50 queries and up to 10k documents each. Evaluation is fast, and can easily be done during training to track your model's performance on general-purpose information retrieval tasks.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Additionally, we also deprecate Python 3.8, add better compatibility with Transformers v4.46.0, and more. Read the full release notes here: ", "raw": "Additionally, we also deprecate Python 3.8, add better compatibility with Transformers v4.46.0, and more. Read the full release notes here: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://github.com/UKPLab/sentence-transformers/releases/tag/v3.3.0", "resource": null, "url": null, "href": "https://github.com/UKPLab/sentence-transformers/releases/tag/v3.3.0", "user": null, "lang": null, "code": null, "label": null } ]
I just released Sentence Transformers v3.3.0 & it's huge! 4.5x speedup for CPU with OpenVINO int8 static quantization, training with prompts for a free perf. boost, PEFT integration, evaluation on NanoBEIR, and more! Details: 1. We integrate Post-Training Static Quantization using OpenVINO, a very efficient solution for CPUs that processes 4.78x as many texts per second on average, while only hurting performance by 0.36% on average. There's a new `export_static_quantized_openvino_model` method to quantize a model. 2. We add the option to train with prompts, e.g. strings like "query: ", "search_document: " or "Represent this sentence for searching relevant passages: ". It's as simple as using the `prompts` argument in `SentenceTransformerTrainingArguments`. Our experiments show that you can easily reach 0.66% to 0.90% relative performance improvement on NDCG@10 at no extra cost by adding "query: " before each training query and "document: " before each training answer. 3. Sentence Transformers now supports training PEFT adapters via 7 new methods for adding new adapters or loading pre-trained ones. You can also directly load a trained adapter with SentenceTransformer as if it's a normal model. Very useful for e.g. 1) training multiple adapters on 1 base model, 2) training bigger models than otherwise possible, or 3) cheaply hosting multiple models by switching multiple adapters on 1 base model. 4. We added easy evaluation on NanoBEIR, a subset of BEIR a.k.a. the MTEB Retrieval benchmark. It contains 13 datasets with 50 queries and up to 10k documents each. Evaluation is fast, and can easily be done during training to track your model's performance on general-purpose information retrieval tasks. Additionally, we also deprecate Python 3.8, add better compatibility with Transformers v4.46.0, and more. Read the full release notes here: https://github.com/UKPLab/sentence-transformers/releases/tag/v3.3.0
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6317233cc92fd6fee317e030/cJHSvvimr1kqgQfHOjO5n.png", "fullname": "Tom Aarsen", "name": "tomaarsen", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 1045, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6317233cc92fd6fee317e030/4xE8txYbHbzViuTgbujdQ.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6317233cc92fd6fee317e030/nk4Dnsz-TdImqy5F_pe-N.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6317233cc92fd6fee317e030/bu8ozWM9-O0d5d1KOi80H.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6317233cc92fd6fee317e030/jgOvDirvliwNenv8fX-kf.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6317233cc92fd6fee317e030/SXRbsjVkOyuK0IYaF5F6w.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6317233cc92fd6fee317e030/gKY7Y5w0ZL8cf_IKjnUwE.png" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "YaTharThShaRma999", "John6666", "iojvsuynv", "Mdubbya", "eriknovak", "den0620", "DmitryRyumin", "csabakecskemeti", "Tanvir1337", "andreagemelli", "praga95", "abdouaziiz", "splevine", "bayang" ], "count": 14 }, { "reaction": "๐Ÿ‘", "users": [ "mlabonne", "m36", "abdouaziiz", "souze8" ], "count": 4 } ]
2024-11-11T12:24:17.000Z
2024-11-11T12:24:33.295Z
[]
/posts/tomaarsen/232920597638334
4,619
0
830550648952715
[ { "type": "text", "value": "Hi everyone, i have been trying to give my chatbots access to the web for a long while now, i have tried using the google search api, taking the links and then scraping them, but it does'nt work that well. does anyone know how you can give a chatbot access to google/the web, so that it has access to current data.", "raw": "Hi everyone, i have been trying to give my chatbots access to the web for a long while now, i have tried using the google search api, taking the links and then scraping them, but it does'nt work that well. does anyone know how you can give a chatbot access to google/the web, so that it has access to current data.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
Hi everyone, i have been trying to give my chatbots access to the web for a long while now, i have tried using the google search api, taking the links and then scraping them, but it does'nt work that well. does anyone know how you can give a chatbot access to google/the web, so that it has access to current data.
{ "avatarUrl": "/avatars/7be1913712fdd1ffe75967ed19007720.svg", "fullname": "stock mining", "name": "automatedstockminingorg", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 9, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "devenbiz4group", "SOHAIBK1" ], "count": 3 } ]
2024-11-11T06:55:54.000Z
2024-11-12T22:46:51.468Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 384, "isFollowing": false }, { "avatarUrl": "/avatars/7be1913712fdd1ffe75967ed19007720.svg", "fullname": "stock mining", "name": "automatedstockminingorg", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 9, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/630d5f5954c3dbd48059e614/x-vcRnBKOFuVjSSWQeuIB.png", "fullname": "Blake Donahoo", "name": "deathstarenterprise", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/automatedstockminingorg/830550648952715
2,550
4
391224485870515
[ { "type": "text", "value": "GRID-6X : Layout for Seamless Image Assembly ๐Ÿ”ฅ", "raw": "GRID-6X : Layout for Seamless Image Assembly ๐Ÿ”ฅ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐ŸชจDemo: ", "raw": "๐ŸชจDemo: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/prithivMLmods/GRID-6X", "resource": { "type": "space", "id": "prithivMLmods/GRID-6X", "discussionNum": null }, "url": "https://huggingface.co/spaces/prithivMLmods/GRID-6X", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐ŸชจDoc / Blog: ", "raw": "๐ŸชจDoc / Blog: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/prithivMLmods/grid-6x", "resource": null, "url": null, "href": "https://huggingface.co/blog/prithivMLmods/grid-6x", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "In the ", "raw": "In the ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "inline_code", "value": null, "raw": "`infer`", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": "infer", "label": null }, { "type": "text", "value": " function:", "raw": " function:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "code_fence", "value": null, "raw": "```python\ngrid_img = Image.new('RGB', (width * grid_size_x, height * grid_size_y))\nfor i, img in enumerate(result.images[:num_images]):\n grid_img.paste(img, (i % grid_size_x * width, i // grid_size_x * height))\n```", "resource": null, "url": null, "href": null, "user": null, "lang": "python", "code": "grid_img = Image.new('RGB', (width * grid_size_x, height * grid_size_y))\nfor i, img in enumerate(result.images[:num_images]):\n grid_img.paste(img, (i % grid_size_x * width, i // grid_size_x * height))", "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "1. **Image Initialization**: ", "raw": "1. **Image Initialization**: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "inline_code", "value": null, "raw": "`grid_img`", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": "grid_img", "label": null }, { "type": "text", "value": " is a blank canvas that will hold the images in a grid format.", "raw": " is a blank canvas that will hold the images in a grid format.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "2. **Image Placement**: Images are pasted onto the canvas using a loop:", "raw": "2. **Image Placement**: Images are pasted onto the canvas using a loop:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " - **Horizontal Position**: ", "raw": " - **Horizontal Position**: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "inline_code", "value": null, "raw": "`(i % grid_size_x) * width`", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": "(i % grid_size_x) * width", "label": null }, { "type": "text", "value": " calculates the x-coordinate.", "raw": " calculates the x-coordinate.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " - **Vertical Position**: ", "raw": " - **Vertical Position**: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "inline_code", "value": null, "raw": "`(i // grid_size_x) * height`", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": "(i // grid_size_x) * height", "label": null }, { "type": "text", "value": " calculates the y-coordinate.", "raw": " calculates the y-coordinate.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "1. **Grid Size Selection**: The user selects the grid size from options like \"2x1\", \"1x2\", \"2x2\", \"2x3\", \"3x2\", and \"1x1\". Each option corresponds to the arrangement of images:", "raw": "1. **Grid Size Selection**: The user selects the grid size from options like \"2x1\", \"1x2\", \"2x2\", \"2x3\", \"3x2\", and \"1x1\". Each option corresponds to the arrangement of images:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " - **2x1**: 2 images in a single row", "raw": " - **2x1**: 2 images in a single row", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " - **1x2**: 1 image in two rows (column layout)", "raw": " - **1x2**: 1 image in two rows (column layout)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " - **2x2**: 2 rows with 2 images each", "raw": " - **2x2**: 2 rows with 2 images each", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " - **2x3**: 2 rows with 3 images each", "raw": " - **2x3**: 2 rows with 3 images each", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " - **3x2**: 3 rows with 2 images each", "raw": " - **3x2**: 3 rows with 2 images each", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " - **1x1**: A single image (default)", "raw": " - **1x1**: A single image (default)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "2. **Image Generation**: Based on the grid size selection, the app calculates the number of images to generate. For example:", "raw": "2. **Image Generation**: Based on the grid size selection, the app calculates the number of images to generate. For example:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " - If the grid size is \"2x2\", the app generates 4 images.", "raw": " - If the grid size is \"2x2\", the app generates 4 images.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " - For \"3x2\", it generates 6 images.", "raw": " - For \"3x2\", it generates 6 images.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "-> Each option arranges images accordingly, providing flexibility in viewing multiple images in one output.", "raw": "-> Each option arranges images accordingly, providing flexibility in viewing multiple images in one output.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "-> Added both of these spaces that support the GRID functionality Layout for Seamless Image Assembly : ", "raw": "-> Added both of these spaces that support the GRID functionality Layout for Seamless Image Assembly : ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "----------", "raw": "----------", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ”ฅIMAGINEO-4K: ", "raw": "๐Ÿ”ฅIMAGINEO-4K: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/prithivMLmods/IMAGINEO-4K", "resource": { "type": "space", "id": "prithivMLmods/IMAGINEO-4K", "discussionNum": null }, "url": "https://huggingface.co/spaces/prithivMLmods/IMAGINEO-4K", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ”ฅGRID-6X: ", "raw": "๐Ÿ”ฅGRID-6X: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/prithivMLmods/GRID-6X", "resource": { "type": "space", "id": "prithivMLmods/GRID-6X", "discussionNum": null }, "url": "https://huggingface.co/spaces/prithivMLmods/GRID-6X", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "----------", "raw": "----------", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": ".", "raw": ".", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": ".", "raw": ".", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": ".@prithivMLmods ๐Ÿค—", "raw": ".@prithivMLmods ๐Ÿค—", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
GRID-6X : Layout for Seamless Image Assembly ๐Ÿ”ฅ ๐ŸชจDemo: https://huggingface.co/spaces/prithivMLmods/GRID-6X ๐ŸชจDoc / Blog: https://huggingface.co/blog/prithivMLmods/grid-6x In the `infer` function: ```python grid_img = Image.new('RGB', (width * grid_size_x, height * grid_size_y)) for i, img in enumerate(result.images[:num_images]): grid_img.paste(img, (i % grid_size_x * width, i // grid_size_x * height)) ``` 1. **Image Initialization**: `grid_img` is a blank canvas that will hold the images in a grid format. 2. **Image Placement**: Images are pasted onto the canvas using a loop: - **Horizontal Position**: `(i % grid_size_x) * width` calculates the x-coordinate. - **Vertical Position**: `(i // grid_size_x) * height` calculates the y-coordinate. 1. **Grid Size Selection**: The user selects the grid size from options like "2x1", "1x2", "2x2", "2x3", "3x2", and "1x1". Each option corresponds to the arrangement of images: - **2x1**: 2 images in a single row - **1x2**: 1 image in two rows (column layout) - **2x2**: 2 rows with 2 images each - **2x3**: 2 rows with 3 images each - **3x2**: 3 rows with 2 images each - **1x1**: A single image (default) 2. **Image Generation**: Based on the grid size selection, the app calculates the number of images to generate. For example: - If the grid size is "2x2", the app generates 4 images. - For "3x2", it generates 6 images. -> Each option arranges images accordingly, providing flexibility in viewing multiple images in one output. -> Added both of these spaces that support the GRID functionality Layout for Seamless Image Assembly : ---------- ๐Ÿ”ฅIMAGINEO-4K: https://huggingface.co/spaces/prithivMLmods/IMAGINEO-4K ๐Ÿ”ฅGRID-6X: https://huggingface.co/spaces/prithivMLmods/GRID-6X ---------- . . .@prithivMLmods ๐Ÿค—
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65bb837dbfb878f46c77de4c/UVtVbF_3rdt0DC8xTkpL1.jpeg", "fullname": "Prithiv Sakthi", "name": "prithivMLmods", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 342, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/EqV9AAkxkczs083HRrS9a.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/8D3JFzobHsM5d7MDvI1eM.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/2hMMo763UIrjElHrVHq9B.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/z9vZQwdEMXWyiC4HhE1w5.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/yTI4MWizFc0E04kNo7XnU.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/uaqIIbkW3XZufHbuY9B4s.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/UZq_AZZbQ1OSyHqOWrMlo.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/wJjpXujFjKeDWKocbQ6vW.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/ipspwn9DRNH6dS4IInhC2.webp" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "rdrede", "darksfx", "hypergod", "civet789", "prithivMLmods", "Ngrthm", "RenderIo" ], "count": 7 }, { "reaction": "โค๏ธ", "users": [ "rdrede", "ai4life44", "civet789", "Ngrthm" ], "count": 4 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "rdrede", "Danis146" ], "count": 3 }, { "reaction": "๐Ÿ˜Ž", "users": [ "Vexia", "RenderIo" ], "count": 2 }, { "reaction": "๐Ÿ‘", "users": [ "Kasnol" ], "count": 1 } ]
2024-11-11T05:55:50.000Z
2024-11-11T08:05:19.661Z
[]
/posts/prithivMLmods/391224485870515
3,966
0
684381844303681
[ { "type": "text", "value": "#EMNLP2024 is happening soon! Unfortunately, I will not be on site, but I will present our poster virtually on Wednesday, Nov 13 (7:45 EST / 13:45 CEST) in Virtual Poster Session 2.", "raw": "#EMNLP2024 is happening soon! Unfortunately, I will not be on site, but I will present our poster virtually on Wednesday, Nov 13 (7:45 EST / 13:45 CEST) in Virtual Poster Session 2.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "In this work, we leverage self-training in an active learning loop in order to train small language models with even less data. Hope to see you there!", "raw": "In this work, we leverage self-training in an active learning loop in order to train small language models with even less data. Hope to see you there!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
#EMNLP2024 is happening soon! Unfortunately, I will not be on site, but I will present our poster virtually on Wednesday, Nov 13 (7:45 EST / 13:45 CEST) in Virtual Poster Session 2. In this work, we leverage self-training in an active learning loop in order to train small language models with even less data. Hope to see you there!
{ "avatarUrl": "/avatars/f32291df2054c1bb4a01889d1b41c0d5.svg", "fullname": "Christopher Schrรถder", "name": "cschroeder", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 17, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/625026749d39e8be3166132f/wnzWVPQYkfpaANZ1AOqig.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 }, { "reaction": "๐Ÿ‘", "users": [ "mrlive01" ], "count": 1 } ]
2024-11-10T22:49:02.000Z
2024-11-10T22:53:54.315Z
[ { "avatarUrl": "/avatars/f32291df2054c1bb4a01889d1b41c0d5.svg", "fullname": "Christopher Schrรถder", "name": "cschroeder", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 17, "isFollowing": false } ]
/posts/cschroeder/684381844303681
657
1
555944473411068
[ { "type": "text", "value": "Hello everyone!!! I am new to this and a little out of my depth (aLOT out of my depth!! LOL!) I am looking through the site and wanted to ask if there were any quailty primers i should read? or a good basic getting started post?", "raw": "Hello everyone!!! I am new to this and a little out of my depth (aLOT out of my depth!! LOL!) I am looking through the site and wanted to ask if there were any quailty primers i should read? or a good basic getting started post?", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Thanks in advance!!", "raw": "Thanks in advance!!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
Hello everyone!!! I am new to this and a little out of my depth (aLOT out of my depth!! LOL!) I am looking through the site and wanted to ask if there were any quailty primers i should read? or a good basic getting started post? Thanks in advance!!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/672e54a15bd9344d447932b8/iuCOjkLJyETdg7abe_NQt.jpeg", "fullname": "Jonathan Payton", "name": "SaRaHAI2024", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 }, { "reaction": "๐Ÿ‘", "users": [ "mrlive01" ], "count": 1 } ]
2024-11-10T13:35:05.000Z
2024-11-12T22:54:51.149Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 384, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6593502ca2607099284523db/13IfQE8qnJsjPXbOeGrLa.jpeg", "fullname": "william marshall", "name": "fuzzy-mittenz", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 16, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/630d5f5954c3dbd48059e614/x-vcRnBKOFuVjSSWQeuIB.png", "fullname": "Blake Donahoo", "name": "deathstarenterprise", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/SaRaHAI2024/555944473411068
824
3
814515366696776
[ { "type": "text", "value": "Style flo : : ๐ŸŽ‰๐Ÿค—", "raw": "Style flo : : ๐ŸŽ‰๐Ÿค—", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "{ Try Now on Flux LoRA DLC โ›ต } : ", "raw": "{ Try Now on Flux LoRA DLC โ›ต } : ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/prithivMLmods/FLUX-LoRA-DLC", "resource": { "type": "space", "id": "prithivMLmods/FLUX-LoRA-DLC", "discussionNum": null }, "url": "https://huggingface.co/spaces/prithivMLmods/FLUX-LoRA-DLC", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "-- Undersea", "raw": "-- Undersea", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "{ Red Fluid } : ", "raw": "{ Red Fluid } : ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/prithivMLmods/Red-Undersea-Flux-LoRA", "resource": { "type": "model", "id": "prithivMLmods/Red-Undersea-Flux-LoRA", "discussionNum": null }, "url": "https://huggingface.co/prithivMLmods/Red-Undersea-Flux-LoRA", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "-- 3D Realmix", "raw": "-- 3D Realmix", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "{ 3D Portrait Render } : ", "raw": "{ 3D Portrait Render } : ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/prithivMLmods/3D-Render-Flux-LoRA", "resource": { "type": "model", "id": "prithivMLmods/3D-Render-Flux-LoRA", "discussionNum": null }, "url": "https://huggingface.co/prithivMLmods/3D-Render-Flux-LoRA", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "-- Pop", "raw": "-- Pop", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "{ Yellow Pop } : ", "raw": "{ Yellow Pop } : ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/prithivMLmods/Yellow-Pop-Flux-Dev-LoRA", "resource": { "type": "model", "id": "prithivMLmods/Yellow-Pop-Flux-Dev-LoRA", "discussionNum": null }, "url": "https://huggingface.co/prithivMLmods/Yellow-Pop-Flux-Dev-LoRA", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "-- Grid", "raw": "-- Grid", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "{ Purple Grid } : ", "raw": "{ Purple Grid } : ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/prithivMLmods/Purple-Grid-Flux-LoRA", "resource": { "type": "model", "id": "prithivMLmods/Purple-Grid-Flux-LoRA", "discussionNum": null }, "url": "https://huggingface.co/prithivMLmods/Purple-Grid-Flux-LoRA", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "{ collections : : }", "raw": "{ collections : : }", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿš€ Flux LoRA : ", "raw": "๐Ÿš€ Flux LoRA : ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/prithivMLmods/flux-lora-collections-66dd5908be2206cfaa8519be", "resource": { "type": "collection", "id": "prithivMLmods/flux-lora-collections-66dd5908be2206cfaa8519be", "discussionNum": null }, "url": "https://huggingface.co/collections/prithivMLmods/flux-lora-collections-66dd5908be2206cfaa8519be", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿš€Collection zero: ", "raw": "๐Ÿš€Collection zero: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/prithivMLmods/collection-zero-and-demo-recently-updated-65e48a7dd8212873836ceca2", "resource": { "type": "collection", "id": "prithivMLmods/collection-zero-and-demo-recently-updated-65e48a7dd8212873836ceca2", "discussionNum": null }, "url": "https://huggingface.co/collections/prithivMLmods/collection-zero-and-demo-recently-updated-65e48a7dd8212873836ceca2", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": ".", "raw": ".", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": ".", "raw": ".", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@prithivMLmods", "resource": null, "url": null, "href": null, "user": "prithivMLmods", "lang": null, "code": null, "label": null }, { "type": "text", "value": " ๐Ÿงจ", "raw": " ๐Ÿงจ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
Style flo : : ๐ŸŽ‰๐Ÿค— { Try Now on Flux LoRA DLC โ›ต } : https://huggingface.co/spaces/prithivMLmods/FLUX-LoRA-DLC -- Undersea { Red Fluid } : https://huggingface.co/prithivMLmods/Red-Undersea-Flux-LoRA -- 3D Realmix { 3D Portrait Render } : https://huggingface.co/prithivMLmods/3D-Render-Flux-LoRA -- Pop { Yellow Pop } : https://huggingface.co/prithivMLmods/Yellow-Pop-Flux-Dev-LoRA -- Grid { Purple Grid } : https://huggingface.co/prithivMLmods/Purple-Grid-Flux-LoRA { collections : : } ๐Ÿš€ Flux LoRA : https://huggingface.co/collections/prithivMLmods/flux-lora-collections-66dd5908be2206cfaa8519be ๐Ÿš€Collection zero: https://huggingface.co/collections/prithivMLmods/collection-zero-and-demo-recently-updated-65e48a7dd8212873836ceca2 . . @prithivMLmods ๐Ÿงจ
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65bb837dbfb878f46c77de4c/UVtVbF_3rdt0DC8xTkpL1.jpeg", "fullname": "Prithiv Sakthi", "name": "prithivMLmods", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 342, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/-DyKl43T-TpHkgeetSHsT.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/-N9wPAmBH8NchvNffwmy1.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/Ldwdv98h1rXbnm78XLvJ6.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/rE7age0TSScDczjz9eiZn.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/73O6ddYsl2LMsIsxsaEry.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/5jxCKlF4pcE3QeIMXWdLd.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/f1U38j2TwOjBWlizFZxIM.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/2o8v9Ue5K2gK_xJNrd_7Y.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/61b5RND-CIhZ8Lg2bC5S2.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/SrIl2CPUOyoUwHdyR-V-Q.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/n_DOu43Qpgrx3i_b8wra1.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/Cw16G8bTba1JrIDtU3c9J.png" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65bb837dbfb878f46c77de4c/UVtVbF_3rdt0DC8xTkpL1.jpeg", "fullname": "Prithiv Sakthi", "name": "prithivMLmods", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 342 } ]
[ { "reaction": "โค๏ธ", "users": [ "darksfx", "hypergod", "ai4life44", "rdrede", "John6666", "saitamaaa", "seko2024", "AtAndDev", "prithivMLmods", "louisbrulenaudet", "edgar222", "Ngrthm", "RenderIo" ], "count": 13 }, { "reaction": "๐Ÿ‘", "users": [ "darksfx", "ai4life44", "seko2024", "AtAndDev", "cyberdioxide", "Ngrthm", "RenderIo" ], "count": 7 }, { "reaction": "๐Ÿ”ฅ", "users": [ "ai4life44", "rdrede", "AtAndDev", "cyberdioxide", "SOHAIBK1" ], "count": 5 }, { "reaction": "๐Ÿš€", "users": [ "hypergod", "John6666", "AtAndDev" ], "count": 3 }, { "reaction": "๐Ÿ‘€", "users": [ "TroglodyteDerivations", "AtAndDev", "RenderIo" ], "count": 3 }, { "reaction": "๐Ÿค", "users": [ "RenderIo" ], "count": 1 } ]
2024-11-10T05:41:37.000Z
2024-11-10T13:07:35.942Z
[]
/posts/prithivMLmods/814515366696776
4,830
0
735753662688458
[ { "type": "text", "value": "Pleased to announce Cat1.0, the newest iteration of my roleplay fine-tunes! ", "raw": "Pleased to announce Cat1.0, the newest iteration of my roleplay fine-tunes! ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/rwitz/cat1.0", "resource": { "type": "model", "id": "rwitz/cat1.0", "discussionNum": null }, "url": "https://huggingface.co/rwitz/cat1.0", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "The model is fine-tuned from Llama-3.1 8B on VERY high quality roleplay chat logs, each stretching for thousands of tokens. Also excels at logic, especially in conversational reasoning! Feel free to give it a test!", "raw": "The model is fine-tuned from Llama-3.1 8B on VERY high quality roleplay chat logs, each stretching for thousands of tokens. Also excels at logic, especially in conversational reasoning! Feel free to give it a test!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
Pleased to announce Cat1.0, the newest iteration of my roleplay fine-tunes! https://huggingface.co/rwitz/cat1.0 The model is fine-tuned from Llama-3.1 8B on VERY high quality roleplay chat logs, each stretching for thousands of tokens. Also excels at logic, especially in conversational reasoning! Feel free to give it a test!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6713b2f6d1cf4a8ca4a3c856/qYPwQM9FyCkR8aU2ZtjoK.jpeg", "fullname": "Ryan Witzman", "name": "rwitz", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 9, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6713b2f6d1cf4a8ca4a3c856/VOk8nKVjF1rL3GhLVgiN-.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6713b2f6d1cf4a8ca4a3c856/uhox-SQbEeRBeprBDKnE2.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6713b2f6d1cf4a8ca4a3c856/kD2LDcQPSV91U_02lvn8S.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6713b2f6d1cf4a8ca4a3c856/ufpFY4Hoj2HvXCurNZTaU.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6713b2f6d1cf4a8ca4a3c856/D2XtzAE2Av3A5fVQ0fBKg.png" } ]
[]
[ { "reaction": "โค๏ธ", "users": [ "rwitz", "John6666", "bluecolarbear", "ai-everyday", "umair894" ], "count": 5 }, { "reaction": "๐Ÿ‘", "users": [ "mrlive01" ], "count": 1 } ]
2024-11-10T00:28:35.000Z
2024-11-11T17:47:37.418Z
[ { "avatarUrl": "/avatars/c871460e7e5ccfb7b2f30834e70289cd.svg", "fullname": "Ankur Gargi", "name": "agargi", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6713b2f6d1cf4a8ca4a3c856/qYPwQM9FyCkR8aU2ZtjoK.jpeg", "fullname": "Ryan Witzman", "name": "rwitz", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 9, "isFollowing": false } ]
/posts/rwitz/735753662688458
2,642
2
243454344307656
[ { "type": "text", "value": "How To Use Mochi 1 Open Source Video Generation Model On Your Windows PC, RunPod and Massed Compute", "raw": "How To Use Mochi 1 Open Source Video Generation Model On Your Windows PC, RunPod and Massed Compute", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Tutorial Link : ", "raw": "Tutorial Link : ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://youtu.be/iqBV7bCbDJY", "resource": null, "url": null, "href": "https://youtu.be/iqBV7bCbDJY", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Mochi 1 from Genmo is the newest state-of-the-art Open Source video generation model that you can use for free on your computer. This model is a breakthrough like the very first Stable Diffusion model but this time it is starting for the video generation models. In this tutorial, I am going to show you how to use Genmo Mochi 1 video generation model on your computer, on windows, locally with the most advanced and very easy to use SwarmUI. SwarmUI as fast as ComfyUI but also as easy as using Automatic1111 Stable Diffusion web UI. Moreover, if you donโ€™t have a powerful GPU to run this model locally, I am going to show you how to use this model on the best cloud providers RunPod and Massed Compute.", "raw": "Mochi 1 from Genmo is the newest state-of-the-art Open Source video generation model that you can use for free on your computer. This model is a breakthrough like the very first Stable Diffusion model but this time it is starting for the video generation models. In this tutorial, I am going to show you how to use Genmo Mochi 1 video generation model on your computer, on windows, locally with the most advanced and very easy to use SwarmUI. SwarmUI as fast as ComfyUI but also as easy as using Automatic1111 Stable Diffusion web UI. Moreover, if you donโ€™t have a powerful GPU to run this model locally, I am going to show you how to use this model on the best cloud providers RunPod and Massed Compute.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ”— Public Open Access Article Used in Video โคต๏ธ", "raw": "๐Ÿ”— Public Open Access Article Used in Video โคต๏ธ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "โ–ถ๏ธ ", "raw": "โ–ถ๏ธ ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://www.patreon.com/posts/106135985", "resource": null, "url": null, "href": "https://www.patreon.com/posts/106135985", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Amazing Ultra Important Tutorials with Chapters and Manually Written Subtitles / Captions", "raw": "Amazing Ultra Important Tutorials with Chapters and Manually Written Subtitles / Captions", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Stable Diffusion 3.5 Large How To Use Tutorial With Best Configuration and Comparison With FLUX DEV : ", "raw": "Stable Diffusion 3.5 Large How To Use Tutorial With Best Configuration and Comparison With FLUX DEV : ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://youtu.be/-zOKhoO9a5s", "resource": null, "url": null, "href": "https://youtu.be/-zOKhoO9a5s", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "FLUX Full Fine-Tuning / DreamBooth Tutorial That Shows A Lot Info Regarding SwarmUI Latest : ", "raw": "FLUX Full Fine-Tuning / DreamBooth Tutorial That Shows A Lot Info Regarding SwarmUI Latest : ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://youtu.be/FvpWy1x5etM", "resource": null, "url": null, "href": "https://youtu.be/FvpWy1x5etM", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Full FLUX Tutorial โ€” FLUX Beats Midjourney for Real : ", "raw": "Full FLUX Tutorial โ€” FLUX Beats Midjourney for Real : ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://youtu.be/bupRePUOA18", "resource": null, "url": null, "href": "https://youtu.be/bupRePUOA18", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Main Windows SwarmUI Tutorial (Watch To Learn How to Use)", "raw": "Main Windows SwarmUI Tutorial (Watch To Learn How to Use)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "How to install and use. You have to watch this to learn how to use SwarmUI", "raw": "How to install and use. You have to watch this to learn how to use SwarmUI", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Has 70 chapters and manually fixed captions : ", "raw": "Has 70 chapters and manually fixed captions : ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://youtu.be/HKX8_F1Er_w", "resource": null, "url": null, "href": "https://youtu.be/HKX8_F1Er_w", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
How To Use Mochi 1 Open Source Video Generation Model On Your Windows PC, RunPod and Massed Compute Tutorial Link : https://youtu.be/iqBV7bCbDJY Mochi 1 from Genmo is the newest state-of-the-art Open Source video generation model that you can use for free on your computer. This model is a breakthrough like the very first Stable Diffusion model but this time it is starting for the video generation models. In this tutorial, I am going to show you how to use Genmo Mochi 1 video generation model on your computer, on windows, locally with the most advanced and very easy to use SwarmUI. SwarmUI as fast as ComfyUI but also as easy as using Automatic1111 Stable Diffusion web UI. Moreover, if you donโ€™t have a powerful GPU to run this model locally, I am going to show you how to use this model on the best cloud providers RunPod and Massed Compute. ๐Ÿ”— Public Open Access Article Used in Video โคต๏ธ โ–ถ๏ธ https://www.patreon.com/posts/106135985 Amazing Ultra Important Tutorials with Chapters and Manually Written Subtitles / Captions Stable Diffusion 3.5 Large How To Use Tutorial With Best Configuration and Comparison With FLUX DEV : https://youtu.be/-zOKhoO9a5s FLUX Full Fine-Tuning / DreamBooth Tutorial That Shows A Lot Info Regarding SwarmUI Latest : https://youtu.be/FvpWy1x5etM Full FLUX Tutorial โ€” FLUX Beats Midjourney for Real : https://youtu.be/bupRePUOA18 Main Windows SwarmUI Tutorial (Watch To Learn How to Use) How to install and use. You have to watch this to learn how to use SwarmUI Has 70 chapters and manually fixed captions : https://youtu.be/HKX8_F1Er_w
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672531901326-6345bd89fe134dfd7a0dba40.png", "fullname": "Furkan Gรถzรผkara", "name": "MonsterMMORPG", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 368, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/aLyqQrljI87irIClM1Z91.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/_c8WhQ0swW7FaFG-lZ9vF.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/Vbl27cPvgnZlTeF6QKRzi.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/-YOirgjpKSsBG-zuVMq_s.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/C1b8c04aTzx0tlpAgylit.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/UbYO09mGInvtFpoXpoN56.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/AwTHLQWqzZQa7Rxcvw2Gk.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/8yhyZ4xUrgnH33mWC33uA.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/kkg8GJwuFGIhnlyz4wrwl.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/Z0yn2TqvZxobe76MsIrud.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/_vlxNRIxNJICoq4_IkWiX.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/7qQRuMZn0_BdQpASo9cQA.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/01F4qw0xiUko0VCBebyYQ.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/wPlmDVdan5NgwKeAv9MlT.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/UBgk0syyojYaJMLk6NF7r.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/ri6etCegw0HWpOITD90XE.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/4ks069m3SDWyBRYg4B2hL.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/SGh_YiZmThm0ORCD-bLHy.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/dXN7U2_DVCwKoae-lLvAj.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/S2W6TkHv-0JJXl7JN9HQ9.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/SBwMa2G9lMxBMbXP3fTOW.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/snppzUFEsnlkTTt1a-y6q.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/-9aHzuaLnDxC-HQLzpYRX.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/hOJEC_OaGY3KbOSbbLpkX.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/aCA5Njptjzmm9QLSpOSGK.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/fQEWgUl8hKDtg7C8ABH88.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/ffAb6zpGVTkhaTO0hy8bj.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/8DC-EhmDpKfUAHdhYpMtI.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/dm7-vRz4PdvUAUnon_kCG.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/gbqUE51oMA_sNXSPWYukh.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/zAc4LE6M0bD3bXUWwo_n6.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/Yn_TsaBtxHJS63PlasFDJ.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/_wUWi1eeOipFbzOLGZOgl.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/yPrKj5mPriUid00eo3xEM.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/USC2hGZ5iRxQds7DQt_Gb.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/ELjOjADM3T01zPNPUnV44.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/7QZU7WTLed_zWgiB5uGgm.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/EnKQbl0zRR4aXDlxmhZ-2.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/ZlfvjviK3R49ElJMKEl4q.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/5S2QDxGEGbZoiIDIElqRN.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/FBPlnR4-cTGE-vzQswO7M.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/HXBbjfLlXNczfm6PCUZki.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/3bUhcLwUmQQwE_l1oX2Z0.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/xe1qP8xliH-W1z_X2WD1b.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/yb45XhlAn0FDmt4GTOXbS.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/Oasoy9N2J4wtRU2H_XBc4.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/rRhM53KtPybpzvJTKHcLq.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/3LaM063fIX8ZDTirEgfHi.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/Tyr5Jm1GaqekZT-0zxCCH.webp" } ]
[]
[ { "reaction": "โค๏ธ", "users": [ "MonsterMMORPG", "DIEUDONNE972", "Svngoku", "aleph65", "andmev", "Yingchaooo", "mikeehren" ], "count": 7 }, { "reaction": "๐Ÿ‘", "users": [ "MonsterMMORPG", "Latyrine", "EX4L", "mrlive01" ], "count": 4 }, { "reaction": "๐Ÿ‘€", "users": [ "MonsterMMORPG", "John6666", "DIEUDONNE972" ], "count": 3 }, { "reaction": "๐Ÿ˜Ž", "users": [ "MonsterMMORPG", "Pent", "DIEUDONNE972" ], "count": 3 }, { "reaction": "๐Ÿ”ฅ", "users": [ "MonsterMMORPG", "Mrdesigner14" ], "count": 2 }, { "reaction": "โž•", "users": [ "MonsterMMORPG", "rose-omer" ], "count": 2 }, { "reaction": "๐Ÿš€", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "๐Ÿค—", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "๐Ÿง ", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "๐Ÿค", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "๐Ÿคฏ", "users": [ "MonsterMMORPG" ], "count": 1 } ]
2024-11-09T23:34:00.000Z
2024-11-09T23:34:00.374Z
[]
/posts/MonsterMMORPG/243454344307656
3,860
0
458878801619459
[ { "type": "text", "value": "๐Ÿ“ข Have you ever been wondered how specifically Transformers were capable for handling long input contexts? ", "raw": "๐Ÿ“ข Have you ever been wondered how specifically Transformers were capable for handling long input contexts? ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "I got a chance to tackle this through long document texts summarization problem, and delighted to share the related survey and diagram for a quick skimming below:", "raw": "I got a chance to tackle this through long document texts summarization problem, and delighted to share the related survey and diagram for a quick skimming below:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Preprint ๐Ÿ“ ", "raw": "Preprint ๐Ÿ“ ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://nicolay-r.github.io/website/data/preprint-AINL_2023_longt5_summarization.pdf", "resource": null, "url": null, "href": "https://nicolay-r.github.io/website/data/preprint-AINL_2023_longt5_summarization.pdf", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Springer ๐Ÿ“ ", "raw": "Springer ๐Ÿ“ ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://link.springer.com/article/10.1007/s10958-024-07435-z", "resource": null, "url": null, "href": "https://link.springer.com/article/10.1007/s10958-024-07435-z", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐ŸŽฏ The aim of the survey was the development of the long-document summarizer for mass-media news in Vietnamese language. ๐Ÿ‡ป๐Ÿ‡ณ ", "raw": "๐ŸŽฏ The aim of the survey was the development of the long-document summarizer for mass-media news in Vietnamese language. ๐Ÿ‡ป๐Ÿ‡ณ ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Sharing for a quick skimming of the methods performance overview of various LM-based solution across several datasets, covering domain-oriented advances in Vietnamese language (see attached screenshots)", "raw": "Sharing for a quick skimming of the methods performance overview of various LM-based solution across several datasets, covering domain-oriented advances in Vietnamese language (see attached screenshots)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "As for solution we consider:", "raw": "As for solution we consider:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "โ˜‘๏ธ 1. Adapt existed ", "raw": "โ˜‘๏ธ 1. Adapt existed ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/google/pegasus-cnn_dailymail", "resource": { "type": "model", "id": "google/pegasus-cnn_dailymail", "discussionNum": null }, "url": "https://huggingface.co/google/pegasus-cnn_dailymail", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " for summarizing large dataset for arranging training", "raw": " for summarizing large dataset for arranging training", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "โ˜‘๏ธ 2. Tuning ", "raw": "โ˜‘๏ธ 2. Tuning ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/google/long-t5-tglobal-large", "resource": { "type": "model", "id": "google/long-t5-tglobal-large", "discussionNum": null }, "url": "https://huggingface.co/google/long-t5-tglobal-large", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " suitable for performing generative summarization.", "raw": " suitable for performing generative summarization.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Implementation details:", "raw": "Implementation details:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐ŸŒŸ ", "raw": "๐ŸŒŸ ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://github.com/nicolay-r/ViLongT5", "resource": null, "url": null, "href": "https://github.com/nicolay-r/ViLongT5", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "(Simplier to go with huggingface rather flaxformer that so far become a legacy engine)", "raw": "(Simplier to go with huggingface rather flaxformer that so far become a legacy engine)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
๐Ÿ“ข Have you ever been wondered how specifically Transformers were capable for handling long input contexts? I got a chance to tackle this through long document texts summarization problem, and delighted to share the related survey and diagram for a quick skimming below: Preprint ๐Ÿ“ https://nicolay-r.github.io/website/data/preprint-AINL_2023_longt5_summarization.pdf Springer ๐Ÿ“ https://link.springer.com/article/10.1007/s10958-024-07435-z ๐ŸŽฏ The aim of the survey was the development of the long-document summarizer for mass-media news in Vietnamese language. ๐Ÿ‡ป๐Ÿ‡ณ Sharing for a quick skimming of the methods performance overview of various LM-based solution across several datasets, covering domain-oriented advances in Vietnamese language (see attached screenshots) As for solution we consider: โ˜‘๏ธ 1. Adapt existed https://huggingface.co/google/pegasus-cnn_dailymail for summarizing large dataset for arranging training โ˜‘๏ธ 2. Tuning https://huggingface.co/google/long-t5-tglobal-large suitable for performing generative summarization. Implementation details: ๐ŸŒŸ https://github.com/nicolay-r/ViLongT5 (Simplier to go with huggingface rather flaxformer that so far become a legacy engine)
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64e62d11d27a8292c3637f86/aptDeBHpCJxcREj6KPLN1.jpeg", "fullname": "Nicolay Rusnachenko", "name": "nicolay-r", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 49, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/EWA9fsZMdRnoIP4a127Yu.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/bNvst3E_RtUZvEM-Tn9l3.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/0dOc4m__Kvh_KDQQ0lSjG.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 }, { "reaction": "๐Ÿ‘", "users": [ "BuiDoan" ], "count": 1 } ]
2024-11-09T19:51:01.000Z
2024-11-09T20:04:14.712Z
[]
/posts/nicolay-r/458878801619459
680
0
602093405820721
[ { "type": "text", "value": "Last Week in Medical AI: Top Research ", "raw": "Last Week in Medical AI: Top Research ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Papers/Models", "raw": "Papers/Models", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " ๐Ÿ”ฅ ", "raw": " ๐Ÿ”ฅ ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "(November 2 -November 9, 2024)", "raw": "(November 2 -November 9, 2024)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ… Medical AI Paper of the Week:", "raw": "๐Ÿ… Medical AI Paper of the Week:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Exploring Large Language Models for Specialist-level Oncology Care", "raw": "Exploring Large Language Models for Specialist-level Oncology Care", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Medical LLM & Other Models:", "raw": "Medical LLM & Other Models:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- GSCo: Generalist-Specialist AI Collaboration", "raw": "- GSCo: Generalist-Specialist AI Collaboration", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- PediatricsGPT: Chinese Pediatric Assistant", "raw": "- PediatricsGPT: Chinese Pediatric Assistant", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- MEG: Knowledge-Enhanced Medical QA", "raw": "- MEG: Knowledge-Enhanced Medical QA", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- AutoProteinEngine: Multimodal Protein LLM", "raw": "- AutoProteinEngine: Multimodal Protein LLM", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Frameworks and Methodologies:", "raw": "Frameworks and Methodologies:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- BrainSegFounder: 3D Neuroimage Analysis", "raw": "- BrainSegFounder: 3D Neuroimage Analysis", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- PASSION: Sub-Saharan Dermatology Dataset", "raw": "- PASSION: Sub-Saharan Dermatology Dataset", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- SAM for Lung X-ray Segmentation", "raw": "- SAM for Lung X-ray Segmentation", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Label Critic: Data-First Approach", "raw": "- Label Critic: Data-First Approach", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Medprompt Runtime Strategies", "raw": "- Medprompt Runtime Strategies", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Medical LLM Applications:", "raw": "Medical LLM Applications:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- CataractBot: Patient Support System", "raw": "- CataractBot: Patient Support System", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- CheX-GPT: X-ray Report Enhancement", "raw": "- CheX-GPT: X-ray Report Enhancement", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- CardioAI: Cancer Cardiotoxicity Monitor", "raw": "- CardioAI: Cancer Cardiotoxicity Monitor", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- HealthQ: Healthcare Conversation Chain", "raw": "- HealthQ: Healthcare Conversation Chain", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- PRObot: Diabetic Retinopathy Assistant", "raw": "- PRObot: Diabetic Retinopathy Assistant", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Medical LLMs & Benchmarks:", "raw": "Medical LLMs & Benchmarks:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- MediQ: Clinical Reasoning Benchmark", "raw": "- MediQ: Clinical Reasoning Benchmark", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Touchstone: Segmentation Evaluation", "raw": "- Touchstone: Segmentation Evaluation", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Medical LLM Adaptation Progress", "raw": "- Medical LLM Adaptation Progress", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Fine-Tuning Medical QA Strategies", "raw": "- Fine-Tuning Medical QA Strategies", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "AI in Healthcare Ethics:", "raw": "AI in Healthcare Ethics:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Healthcare Robotics with LLMs", "raw": "- Healthcare Robotics with LLMs", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- XAI in Clinical Practice", "raw": "- XAI in Clinical Practice", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Precision Rehabilitation Framework", "raw": "- Precision Rehabilitation Framework", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Multimodal AI Challenges", "raw": "- Multimodal AI Challenges", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Now you can watch and listen to the latest Medical AI papers daily on our YouTube and Spotify channels as well!", "raw": "Now you can watch and listen to the latest Medical AI papers daily on our YouTube and Spotify channels as well!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Full Thread: ", "raw": "- Full Thread: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://x.com/OpenlifesciAI/status/1855207141302473090", "resource": null, "url": null, "href": "https://x.com/OpenlifesciAI/status/1855207141302473090", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- YouTube: ", "raw": "- YouTube: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://youtu.be/ad0uTnYuTo8", "resource": null, "url": null, "href": "https://youtu.be/ad0uTnYuTo8", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Spotify: ", "raw": "- Spotify: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://open.spotify.com/episode/6s39t1UJZk1i10szuXP2qN", "resource": null, "url": null, "href": "https://open.spotify.com/episode/6s39t1UJZk1i10szuXP2qN", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
Last Week in Medical AI: Top Research Papers/Models ๐Ÿ”ฅ (November 2 -November 9, 2024) ๐Ÿ… Medical AI Paper of the Week: Exploring Large Language Models for Specialist-level Oncology Care Medical LLM & Other Models: - GSCo: Generalist-Specialist AI Collaboration - PediatricsGPT: Chinese Pediatric Assistant - MEG: Knowledge-Enhanced Medical QA - AutoProteinEngine: Multimodal Protein LLM Frameworks and Methodologies: - BrainSegFounder: 3D Neuroimage Analysis - PASSION: Sub-Saharan Dermatology Dataset - SAM for Lung X-ray Segmentation - Label Critic: Data-First Approach - Medprompt Runtime Strategies Medical LLM Applications: - CataractBot: Patient Support System - CheX-GPT: X-ray Report Enhancement - CardioAI: Cancer Cardiotoxicity Monitor - HealthQ: Healthcare Conversation Chain - PRObot: Diabetic Retinopathy Assistant Medical LLMs & Benchmarks: - MediQ: Clinical Reasoning Benchmark - Touchstone: Segmentation Evaluation - Medical LLM Adaptation Progress - Fine-Tuning Medical QA Strategies AI in Healthcare Ethics: - Healthcare Robotics with LLMs - XAI in Clinical Practice - Precision Rehabilitation Framework - Multimodal AI Challenges Now you can watch and listen to the latest Medical AI papers daily on our YouTube and Spotify channels as well! - Full Thread: https://x.com/OpenlifesciAI/status/1855207141302473090 - YouTube: https://youtu.be/ad0uTnYuTo8 - Spotify: https://open.spotify.com/episode/6s39t1UJZk1i10szuXP2qN
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f3fe13d79c1ba4c353d0c19/XswyGe3OtOdZ6g7rnrgfc.png", "fullname": "Aaditya Ura", "name": "aaditya", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 221, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/6vvSqKiMMtfw3xpLDmwwf.png" } ]
[]
[ { "reaction": "๐Ÿš€", "users": [ "aaditya", "Healthtensor", "John6666", "flflow", "BayesTensor", "RamBhakt007", "mesut07" ], "count": 7 }, { "reaction": "๐Ÿค—", "users": [ "aaditya", "BayesTensor", "Healthtensor", "introvoyz041", "mesut07" ], "count": 5 }, { "reaction": "๐Ÿ”ฅ", "users": [ "aaditya", "BayesTensor", "Healthtensor", "ai-everyday" ], "count": 4 }, { "reaction": "โค๏ธ", "users": [ "aaditya", "BayesTensor", "mesut07" ], "count": 3 }, { "reaction": "๐Ÿ‘", "users": [ "aaditya", "Latyrine" ], "count": 2 }, { "reaction": "๐Ÿง ", "users": [ "aaditya" ], "count": 1 } ]
2024-11-09T13:24:36.000Z
2024-11-09T13:24:36.185Z
[]
/posts/aaditya/602093405820721
3,092
0
957744935743333
[ { "type": "text", "value": "Good lord... Spent almost a day debugging this and it turns out it was an issue of gradio update incompatible with the new fastapi.", "raw": "Good lord... Spent almost a day debugging this and it turns out it was an issue of gradio update incompatible with the new fastapi.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://discuss.huggingface.co/t/huggingface-space-failed-after-working-initially/105514/8", "resource": null, "url": null, "href": "https://discuss.huggingface.co/t/huggingface-space-failed-after-working-initially/105514/8", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Finally got it back online! Come chat with your favorite anime characters here:", "raw": "Finally got it back online! Come chat with your favorite anime characters here:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/kz919/Persona-AI", "resource": { "type": "space", "id": "kz919/Persona-AI", "discussionNum": null }, "url": "https://huggingface.co/spaces/kz919/Persona-AI", "href": null, "user": null, "lang": null, "code": null, "label": null } ]
Good lord... Spent almost a day debugging this and it turns out it was an issue of gradio update incompatible with the new fastapi. https://discuss.huggingface.co/t/huggingface-space-failed-after-working-initially/105514/8 Finally got it back online! Come chat with your favorite anime characters here: https://huggingface.co/spaces/kz919/Persona-AI
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62140dcdcf7928035e8135ad/FTiirwS_L6IaLHmHwIo2g.png", "fullname": "Kaizhao Liang", "name": "kz919", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 34, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "kz919", "Tonic" ], "count": 3 } ]
2024-09-06T04:48:47.000Z
2024-09-06T04:48:47.381Z
[]
/posts/kz919/957744935743333
636
0
891741019531656
[ { "type": "text", "value": "Good folks at Epoch AI have just released their most comprehensive database yet, tracking over 800 state-of-the-art and historically notable AI models. This incredible resource provides key insights into the factors driving machine learning progress.", "raw": "Good folks at Epoch AI have just released their most comprehensive database yet, tracking over 800 state-of-the-art and historically notable AI models. This incredible resource provides key insights into the factors driving machine learning progress.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Since 2010, the training compute used to create AI models has been growing at a staggering rate of 4.1x per year. That means the computational power behind these models is doubling roughly every six months! And it's not just the compute that's increasing - the costs are too. Training compute costs for the largest models are doubling every nine months, with the most advanced models now costing hundreds of millions of dollars.", "raw": "Since 2010, the training compute used to create AI models has been growing at a staggering rate of 4.1x per year. That means the computational power behind these models is doubling roughly every six months! And it's not just the compute that's increasing - the costs are too. Training compute costs for the largest models are doubling every nine months, with the most advanced models now costing hundreds of millions of dollars.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Interestingly, training compute has scaled up faster for language models compared to vision. While the largest vision and language models had similar compute requirements before 2020, language models have since rapidly outpaced vision models, driven by the success of transformer architectures. The size of datasets used to train language models is also doubling approximately every eight months.", "raw": "Interestingly, training compute has scaled up faster for language models compared to vision. While the largest vision and language models had similar compute requirements before 2020, language models have since rapidly outpaced vision models, driven by the success of transformer architectures. The size of datasets used to train language models is also doubling approximately every eight months.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Another fascinating trend is that the length of time spent training notable models is growing by about 1.2x per year. While longer training times could ease hardware constraints, there is a tradeoff to consider. For very long runs, waiting for algorithmic and hardware improvements might be more beneficial than simply extending training.", "raw": "Another fascinating trend is that the length of time spent training notable models is growing by about 1.2x per year. While longer training times could ease hardware constraints, there is a tradeoff to consider. For very long runs, waiting for algorithmic and hardware improvements might be more beneficial than simply extending training.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "If this continues, by 2028, we will reach cluster prices in the 100 billion dollars, using 10GW of power!", "raw": "If this continues, by 2028, we will reach cluster prices in the 100 billion dollars, using 10GW of power!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Link: ", "raw": "Link: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://epochai.org/data/notable-ai-models", "resource": null, "url": null, "href": "https://epochai.org/data/notable-ai-models", "user": null, "lang": null, "code": null, "label": null } ]
Good folks at Epoch AI have just released their most comprehensive database yet, tracking over 800 state-of-the-art and historically notable AI models. This incredible resource provides key insights into the factors driving machine learning progress. Since 2010, the training compute used to create AI models has been growing at a staggering rate of 4.1x per year. That means the computational power behind these models is doubling roughly every six months! And it's not just the compute that's increasing - the costs are too. Training compute costs for the largest models are doubling every nine months, with the most advanced models now costing hundreds of millions of dollars. Interestingly, training compute has scaled up faster for language models compared to vision. While the largest vision and language models had similar compute requirements before 2020, language models have since rapidly outpaced vision models, driven by the success of transformer architectures. The size of datasets used to train language models is also doubling approximately every eight months. Another fascinating trend is that the length of time spent training notable models is growing by about 1.2x per year. While longer training times could ease hardware constraints, there is a tradeoff to consider. For very long runs, waiting for algorithmic and hardware improvements might be more beneficial than simply extending training. If this continues, by 2028, we will reach cluster prices in the 100 billion dollars, using 10GW of power! Link: https://epochai.org/data/notable-ai-models
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 197, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/RAemxwnzjGBe3kgNRfhrr.jpeg" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "louisbrulenaudet" ], "count": 2 }, { "reaction": "๐Ÿ‘", "users": [ "JRZ" ], "count": 1 } ]
2024-09-06T03:40:47.000Z
2024-09-06T03:40:47.733Z
[]
/posts/singhsidhukuldeep/891741019531656
772
0
928757596721302
[ { "type": "text", "value": "Decided to try to check how many weights in a 70b F32 model would be squashed when converted to F16 (spoiler, it's shockingly few)", "raw": "Decided to try to check how many weights in a 70b F32 model would be squashed when converted to F16 (spoiler, it's shockingly few)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "The reason for this comparison is that it should represent the same percentage of squishing as bf16 to fp16", "raw": "The reason for this comparison is that it should represent the same percentage of squishing as bf16 to fp16", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Had claude make me a script, using the new Reflection-70B, and these are the results:", "raw": "Had claude make me a script, using the new Reflection-70B, and these are the results:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Total weights: 70553706496", "raw": "Total weights: 70553706496", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Fully representable: 70530215524", "raw": "Fully representable: 70530215524", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Squashed: 23490972", "raw": "Squashed: 23490972", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Percentage squashed: 0.03%", "raw": "Percentage squashed: 0.03%", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "0.03%!!!!", "raw": "0.03%!!!!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "A couple things to note, this uses a roundtrip of F32 -> F16 -> F32 and then torch.isclose to account for rounding errors that come up by the very nature of extremely accurate numbers, but it uses VERY small tolerances (rtol=1e-5, atol=1e-8)", "raw": "A couple things to note, this uses a roundtrip of F32 -> F16 -> F32 and then torch.isclose to account for rounding errors that come up by the very nature of extremely accurate numbers, but it uses VERY small tolerances (rtol=1e-5, atol=1e-8)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "This is also examining EVERY weight that was stored at F32, and for most layers I was somewhere between 0% and 0.03% of weights being squashed, no major outliers.", "raw": "This is also examining EVERY weight that was stored at F32, and for most layers I was somewhere between 0% and 0.03% of weights being squashed, no major outliers.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Overall, I feel even safer converting to F16 for llama.cpp, the extremely small number of weights that fall outside the range are likely so small that they don't actually play a role in the final output of the model at inference anyways.", "raw": "Overall, I feel even safer converting to F16 for llama.cpp, the extremely small number of weights that fall outside the range are likely so small that they don't actually play a role in the final output of the model at inference anyways.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
Decided to try to check how many weights in a 70b F32 model would be squashed when converted to F16 (spoiler, it's shockingly few) The reason for this comparison is that it should represent the same percentage of squishing as bf16 to fp16 Had claude make me a script, using the new Reflection-70B, and these are the results: Total weights: 70553706496 Fully representable: 70530215524 Squashed: 23490972 Percentage squashed: 0.03% 0.03%!!!! A couple things to note, this uses a roundtrip of F32 -> F16 -> F32 and then torch.isclose to account for rounding errors that come up by the very nature of extremely accurate numbers, but it uses VERY small tolerances (rtol=1e-5, atol=1e-8) This is also examining EVERY weight that was stored at F32, and for most layers I was somewhere between 0% and 0.03% of weights being squashed, no major outliers. Overall, I feel even safer converting to F16 for llama.cpp, the extremely small number of weights that fall outside the range are likely so small that they don't actually play a role in the final output of the model at inference anyways.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6435718aaaef013d1aec3b8b/XKf-8MA47tjVAM6SCX0MP.jpeg", "fullname": "Bartowski", "name": "bartowski", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 2735, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "prithivMLmods", "Sri-Vigneshwar-DJ", "Joseph717171", "not-lain", "John6666", "morph3v5" ], "count": 6 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "Joseph717171" ], "count": 2 }, { "reaction": "๐Ÿคฏ", "users": [ "louisbrulenaudet", "Joseph717171" ], "count": 2 }, { "reaction": "โค๏ธ", "users": [ "Joseph717171", "KhaldiAbderrhmane" ], "count": 2 }, { "reaction": "๐Ÿค—", "users": [ "Joseph717171" ], "count": 1 }, { "reaction": "๐Ÿง ", "users": [ "Joseph717171" ], "count": 1 }, { "reaction": "๐Ÿš€", "users": [ "Joseph717171" ], "count": 1 }, { "reaction": "๐Ÿ‘", "users": [ "MoonRide" ], "count": 1 } ]
2024-09-05T21:49:55.000Z
2024-09-29T17:42:38.985Z
[ { "avatarUrl": "/avatars/da52e5fce67042332fa1e9f5fd3e5635.svg", "fullname": "Luke Chadwick", "name": "vertis", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6435718aaaef013d1aec3b8b/XKf-8MA47tjVAM6SCX0MP.jpeg", "fullname": "Bartowski", "name": "bartowski", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 2735, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 384, "isFollowing": false }, { "avatarUrl": "/avatars/ea4398745974d781ae9dc0e95b12cabe.svg", "fullname": "Joseph", "name": "Joseph717171", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 22, "isFollowing": false }, { "avatarUrl": "/avatars/99351620d65d263418e6d0d4e170f055.svg", "fullname": "Abrosimov", "name": "ajiriro", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/66d1efa935c36f266f507cff/a2-fPLeGwAp5fqCKdqfzp.jpeg", "fullname": "Harmendo", "name": "Hampetiudo", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "/avatars/3b03217c22442b7bfed9beac2bf50d17.svg", "fullname": "Alex Daminger", "name": "Handgun1773", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }, { "avatarUrl": "/avatars/98d7cbc7bf4cbf4f2810cbc0a1a34d64.svg", "fullname": "Iwan Kawrakow", "name": "ikawrakow", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 116, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/4Az8a8F60rNOD3L3ThsCe.png", "fullname": "Compilade", "name": "compilade", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false } ]
/posts/bartowski/928757596721302
16,096
20
938110381581989
[ { "type": "mention", "value": null, "raw": "@ehartford", "resource": null, "url": null, "href": null, "user": "ehartford", "lang": null, "code": null, "label": null }, { "type": "text", "value": " ", "raw": " ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/CohereForAI/c4ai-command-r-plus", "resource": { "type": "model", "id": "CohereForAI/c4ai-command-r-plus", "discussionNum": null }, "url": "https://huggingface.co/CohereForAI/c4ai-command-r-plus", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " dolphin when?", "raw": " dolphin when?", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
@ehartford https://huggingface.co/CohereForAI/c4ai-command-r-plus dolphin when?
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/659f000b83abded48e190901/BnXL_XYbVX6PHngfQLECW.png", "fullname": "Noa Roggendorff", "name": "nroggendorff", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 138, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63111b2d88942700629f5771/u2a9y-yx6TG0N31OhMSHI.png", "fullname": "Eric Hartford", "name": "ehartford", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3261 } ]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "mahiatlinux", "danielus", "Locutusque", "den0620", "AtAndDev" ], "count": 6 } ]
2024-09-05T21:01:30.000Z
2024-09-05T21:01:30.000Z
[]
/posts/nroggendorff/938110381581989
1,036
0
697276772763075
[ { "type": "text", "value": "The ", "raw": "The ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "inline_code", "value": null, "raw": "`timm`", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": "timm", "label": null }, { "type": "text", "value": " leaderboard ", "raw": " leaderboard ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/timm/leaderboard", "resource": { "type": "space", "id": "timm/leaderboard", "discussionNum": null }, "url": "https://huggingface.co/spaces/timm/leaderboard", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " has been updated with the ability to select different hardware benchmark sets: RTX4090, RTX3090, two different CPUs along with some NCHW / NHWC layout and torch.compile (dynamo) variations. ", "raw": " has been updated with the ability to select different hardware benchmark sets: RTX4090, RTX3090, two different CPUs along with some NCHW / NHWC layout and torch.compile (dynamo) variations. ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Also worth pointing out, there are three rather newish 'test' models that you'll see at the top of any samples/sec comparison:", "raw": "Also worth pointing out, there are three rather newish 'test' models that you'll see at the top of any samples/sec comparison:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "* test_vit (", "raw": "* test_vit (", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/timm/test_vit.r160_in1k", "resource": { "type": "model", "id": "timm/test_vit.r160_in1k", "discussionNum": null }, "url": "https://huggingface.co/timm/test_vit.r160_in1k", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": ")", "raw": ")", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "* test_efficientnet (", "raw": "* test_efficientnet (", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/timm/test_efficientnet.r160_in1k", "resource": { "type": "model", "id": "timm/test_efficientnet.r160_in1k", "discussionNum": null }, "url": "https://huggingface.co/timm/test_efficientnet.r160_in1k", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": ")", "raw": ")", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "* test_byobnet (", "raw": "* test_byobnet (", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/timm/test_byobnet.r160_in1k", "resource": { "type": "model", "id": "timm/test_byobnet.r160_in1k", "discussionNum": null }, "url": "https://huggingface.co/timm/test_byobnet.r160_in1k", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": ", a mix of resnet, darknet, effnet/regnet like blocks)", "raw": ", a mix of resnet, darknet, effnet/regnet like blocks)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "They are < 0.5M params, insanely fast and originally intended for unit testing w/ real weights. They have awful ImageNet top-1, it's rare to have anyone bother to train a model this small on ImageNet (the classifier is roughly 30-70% of the param count!). However, they are FAST on very limited hadware and you can fine-tune them well on small data. Could be the model you're looking for?", "raw": "They are < 0.5M params, insanely fast and originally intended for unit testing w/ real weights. They have awful ImageNet top-1, it's rare to have anyone bother to train a model this small on ImageNet (the classifier is roughly 30-70% of the param count!). However, they are FAST on very limited hadware and you can fine-tune them well on small data. Could be the model you're looking for?", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
The `timm` leaderboard https://huggingface.co/spaces/timm/leaderboard has been updated with the ability to select different hardware benchmark sets: RTX4090, RTX3090, two different CPUs along with some NCHW / NHWC layout and torch.compile (dynamo) variations. Also worth pointing out, there are three rather newish 'test' models that you'll see at the top of any samples/sec comparison: * test_vit (https://huggingface.co/timm/test_vit.r160_in1k) * test_efficientnet (https://huggingface.co/timm/test_efficientnet.r160_in1k) * test_byobnet (https://huggingface.co/timm/test_byobnet.r160_in1k, a mix of resnet, darknet, effnet/regnet like blocks) They are < 0.5M params, insanely fast and originally intended for unit testing w/ real weights. They have awful ImageNet top-1, it's rare to have anyone bother to train a model this small on ImageNet (the classifier is roughly 30-70% of the param count!). However, they are FAST on very limited hadware and you can fine-tune them well on small data. Could be the model you're looking for?
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1667002643224-604a5184dca2c7ac7508b849.jpeg", "fullname": "Ross Wightman", "name": "rwightman", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 214, "isFollowing": false }
[]
[]
[ { "reaction": "โค๏ธ", "users": [ "clem", "MohamedRashad", "John6666", "bryant1410" ], "count": 4 }, { "reaction": "๐Ÿ”ฅ", "users": [ "de-Rodrigo" ], "count": 1 }, { "reaction": "๐Ÿ‘", "users": [ "maxiw" ], "count": 1 } ]
2024-09-05T18:49:22.000Z
2024-09-05T18:57:05.588Z
[]
/posts/rwightman/697276772763075
1,273
0
247019069617685
[ { "type": "text", "value": "I have put together a notebook on Multimodal RAG, where we do not process the documents with hefty pipelines but natively use:", "raw": "I have put together a notebook on Multimodal RAG, where we do not process the documents with hefty pipelines but natively use:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- ", "raw": "- ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/vidore/colpali", "resource": { "type": "model", "id": "vidore/colpali", "discussionNum": null }, "url": "https://huggingface.co/vidore/colpali", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " for retrieval ๐Ÿ“– it doesn't need indexing with image-text pairs but just images!", "raw": " for retrieval ๐Ÿ“– it doesn't need indexing with image-text pairs but just images!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- ", "raw": "- ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct", "resource": { "type": "model", "id": "Qwen/Qwen2-VL-2B-Instruct", "discussionNum": null }, "url": "https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " for generation ๐Ÿ’ฌ directly feed images as is to a vision language model with no processing to text! ", "raw": " for generation ๐Ÿ’ฌ directly feed images as is to a vision language model with no processing to text! ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "I used ColPali implementation of the new ๐Ÿญ Byaldi library by ", "raw": "I used ColPali implementation of the new ๐Ÿญ Byaldi library by ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@bclavie", "resource": null, "url": null, "href": null, "user": "bclavie", "lang": null, "code": null, "label": null }, { "type": "text", "value": " ๐Ÿค—", "raw": " ๐Ÿค—", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://github.com/answerdotai/byaldi", "resource": null, "url": null, "href": "https://github.com/answerdotai/byaldi", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Link to notebook: ", "raw": "Link to notebook: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://github.com/merveenoyan/smol-vision/blob/main/ColPali_%2B_Qwen2_VL.ipynb", "resource": null, "url": null, "href": "https://github.com/merveenoyan/smol-vision/blob/main/ColPali_%2B_Qwen2_VL.ipynb", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
I have put together a notebook on Multimodal RAG, where we do not process the documents with hefty pipelines but natively use: - https://huggingface.co/vidore/colpali for retrieval ๐Ÿ“– it doesn't need indexing with image-text pairs but just images! - https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct for generation ๐Ÿ’ฌ directly feed images as is to a vision language model with no processing to text! I used ColPali implementation of the new ๐Ÿญ Byaldi library by @bclavie ๐Ÿค— https://github.com/answerdotai/byaldi Link to notebook: https://github.com/merveenoyan/smol-vision/blob/main/ColPali_%2B_Qwen2_VL.ipynb
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png", "fullname": "Merve Noyan", "name": "merve", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5520, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5ff60d4352c26e9bc240badd/HzoknJibrSasc1ZzU71XA.png", "fullname": "Benjamin Claviรฉ", "name": "bclavie", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 28 } ]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "rwightman", "umair894", "clem", "louisbrulenaudet", "s3nh", "John6666", "BasitMustafa", "Johnyquest7", "phanhoang", "resbyte", "denizaybey", "allandclive", "fdaudens", "vilarin", "ak0601", "rreed-pha", "xi0v", "Rajaram1996", "Rayvee", "oceansweep", "parjun", "byteprobe", "Filippo" ], "count": 23 }, { "reaction": "๐Ÿ‘", "users": [ "hitchhiker3010", "Csplk", "sasikiran", "fsommers", "rogermt", "navin7", "sambarnett96", "oceansweep", "ysdede", "shreyamondal" ], "count": 10 }, { "reaction": "โค๏ธ", "users": [ "rreed-pha", "oceansweep", "Yassmen", "madstuntman11" ], "count": 4 } ]
2024-09-05T17:10:03.000Z
2024-09-05T17:10:03.412Z
[]
/posts/merve/247019069617685
5,497
0
875308955939620
[ { "type": "text", "value": "How do i access llama 3.1 70b in my space ?", "raw": "How do i access llama 3.1 70b in my space ?", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "this doesn't seem to work, can someone help me with a working code ", "raw": "this doesn't seem to work, can someone help me with a working code ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "from transformers import AutoConfig", "raw": "from transformers import AutoConfig", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "config = AutoConfig.from_pretrained(\"meta-llama/Meta-Llama-3.1-70B\", revision=\"main\")", "raw": "config = AutoConfig.from_pretrained(\"meta-llama/Meta-Llama-3.1-70B\", revision=\"main\")", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "config.rope_scaling = {\"type\": \"llama3\", \"factor\": 8.0}", "raw": "config.rope_scaling = {\"type\": \"llama3\", \"factor\": 8.0}", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "model = AutoModelForCausalLM.from_pretrained(\"meta-llama/Meta-Llama-3.1-70B\", config=config, use_auth_token=True)", "raw": "model = AutoModelForCausalLM.from_pretrained(\"meta-llama/Meta-Llama-3.1-70B\", config=config, use_auth_token=True)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
How do i access llama 3.1 70b in my space ? this doesn't seem to work, can someone help me with a working code from transformers import AutoConfig config = AutoConfig.from_pretrained("meta-llama/Meta-Llama-3.1-70B", revision="main") config.rope_scaling = {"type": "llama3", "factor": 8.0} model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3.1-70B", config=config, use_auth_token=True)
{ "avatarUrl": "/avatars/fcf9eac61e0ec82ba5503bf07c867247.svg", "fullname": "Rangaiah", "name": "Shamurangaiah", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-09-05T16:50:57.000Z
2024-09-06T13:20:27.221Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 384, "isFollowing": false }, { "avatarUrl": "/avatars/fcf9eac61e0ec82ba5503bf07c867247.svg", "fullname": "Rangaiah", "name": "Shamurangaiah", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/Shamurangaiah/875308955939620
359
11
834561196751118
[ { "type": "text", "value": "๐Ÿš€ย ๐—ช๐—ต๐—ฒ๐—ฟ๐—ฒ ๐˜€๐—ฐ๐—ฎ๐—น๐—ถ๐—ป๐—ด ๐—น๐—ฎ๐˜„๐˜€ ๐—ฎ๐—ฟ๐—ฒ ๐˜๐—ฎ๐—ธ๐—ถ๐—ป๐—ด ๐˜‚๐˜€ : ๐—ฏ๐˜† ๐Ÿฎ๐Ÿฌ๐Ÿฎ๐Ÿด, ๐—”๐—œ ๐—–๐—น๐˜‚๐˜€๐˜๐—ฒ๐—ฟ๐˜€ ๐˜„๐—ถ๐—น๐—น ๐—ฟ๐—ฒ๐—ฎ๐—ฐ๐—ต ๐˜๐—ต๐—ฒ ๐—ฝ๐—ผ๐˜„๐—ฒ๐—ฟ ๐—ฐ๐—ผ๐—ป๐˜€๐˜‚๐—บ๐—ฝ๐˜๐—ถ๐—ผ๐—ป ๐—ผ๐—ณ ๐—ฒ๐—ป๐˜๐—ถ๐—ฟ๐—ฒ ๐—ฐ๐—ผ๐˜‚๐—ป๐˜๐—ฟ๐—ถ๐—ฒ๐˜€", "raw": "๐Ÿš€ย ๐—ช๐—ต๐—ฒ๐—ฟ๐—ฒ ๐˜€๐—ฐ๐—ฎ๐—น๐—ถ๐—ป๐—ด ๐—น๐—ฎ๐˜„๐˜€ ๐—ฎ๐—ฟ๐—ฒ ๐˜๐—ฎ๐—ธ๐—ถ๐—ป๐—ด ๐˜‚๐˜€ : ๐—ฏ๐˜† ๐Ÿฎ๐Ÿฌ๐Ÿฎ๐Ÿด, ๐—”๐—œ ๐—–๐—น๐˜‚๐˜€๐˜๐—ฒ๐—ฟ๐˜€ ๐˜„๐—ถ๐—น๐—น ๐—ฟ๐—ฒ๐—ฎ๐—ฐ๐—ต ๐˜๐—ต๐—ฒ ๐—ฝ๐—ผ๐˜„๐—ฒ๐—ฟ ๐—ฐ๐—ผ๐—ป๐˜€๐˜‚๐—บ๐—ฝ๐˜๐—ถ๐—ผ๐—ป ๐—ผ๐—ณ ๐—ฒ๐—ป๐˜๐—ถ๐—ฟ๐—ฒ ๐—ฐ๐—ผ๐˜‚๐—ป๐˜๐—ฟ๐—ถ๐—ฒ๐˜€", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Reminder : โ€œScaling lawsโ€ are empirical laws saying that if you keep multiplying your compute by x10, your models will mechanically keep getting better and better.", "raw": "Reminder : โ€œScaling lawsโ€ are empirical laws saying that if you keep multiplying your compute by x10, your models will mechanically keep getting better and better.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "To give you an idea, GPT-3 can barely write sentences, and GPT-4, which only used x15 its amount of compute, already sounds much smarter than some of my friends (although it's not really - or at least I haven't tested them side-by side). So you can imagine how far a x100 over GPT-4 can take us.", "raw": "To give you an idea, GPT-3 can barely write sentences, and GPT-4, which only used x15 its amount of compute, already sounds much smarter than some of my friends (although it's not really - or at least I haven't tested them side-by side). So you can imagine how far a x100 over GPT-4 can take us.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐ŸŽ๏ธย As a result, tech titans are racing to build the biggest models, and for this they need gigantic training clusters.", "raw": "๐ŸŽ๏ธย As a result, tech titans are racing to build the biggest models, and for this they need gigantic training clusters.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "The picture below shows the growth of training compute: it is increasing at a steady exponential rate of a x10 every 2 years. So letโ€™s take this progress a bit further:", "raw": "The picture below shows the growth of training compute: it is increasing at a steady exponential rate of a x10 every 2 years. So letโ€™s take this progress a bit further:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- 2022: starting training for GPT-4 : 10^26 FLOPs, cost of $100M", "raw": "- 2022: starting training for GPT-4 : 10^26 FLOPs, cost of $100M", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- 2024: today, companies start training on much larger clusters like the โ€œsuper AI clusterโ€ of Elon Muskโ€™s xAI, 10^27 FLOPS, $1B", "raw": "- 2024: today, companies start training on much larger clusters like the โ€œsuper AI clusterโ€ of Elon Muskโ€™s xAI, 10^27 FLOPS, $1B", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- 2026 : by then clusters will require 1GW, i.e. around the full power generated by a nuclear reactor", "raw": "- 2026 : by then clusters will require 1GW, i.e. around the full power generated by a nuclear reactor", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- 2028: we reach cluster prices in the 100 billion dollars, using 10GW, more than the most powerful power stations currently in use in the US. This last size seems crazy, but Microsoft and OpenAI already are planning one.", "raw": "- 2028: we reach cluster prices in the 100 billion dollars, using 10GW, more than the most powerful power stations currently in use in the US. This last size seems crazy, but Microsoft and OpenAI already are planning one.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Will AI clusters effectively reach these crazy sizes where the consume as much as entire countries? ", "raw": "Will AI clusters effectively reach these crazy sizes where the consume as much as entire countries? ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "โžก๏ธย Three key ingredients of training might be a roadblock to scaling up :", "raw": "โžก๏ธย Three key ingredients of training might be a roadblock to scaling up :", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ’ธย Money: but itโ€™s very unlikely, given the potential market size for AGI, that investors lose interest.", "raw": "๐Ÿ’ธย Money: but itโ€™s very unlikely, given the potential market size for AGI, that investors lose interest.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "โšก๏ธ Energy supply at a specific location", "raw": "โšก๏ธ Energy supply at a specific location", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ“šย Training data: weโ€™re already using 15 trillion tokens for Llama-3.1 when Internet has something like 60 trillion.", "raw": "๐Ÿ“šย Training data: weโ€™re already using 15 trillion tokens for Llama-3.1 when Internet has something like 60 trillion.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿค”ย Iโ€™d be curious to hear your thoughts: do you think weโ€™ll race all the way there?", "raw": "๐Ÿค”ย Iโ€™d be curious to hear your thoughts: do you think weโ€™ll race all the way there?", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
๐Ÿš€ย ๐—ช๐—ต๐—ฒ๐—ฟ๐—ฒ ๐˜€๐—ฐ๐—ฎ๐—น๐—ถ๐—ป๐—ด ๐—น๐—ฎ๐˜„๐˜€ ๐—ฎ๐—ฟ๐—ฒ ๐˜๐—ฎ๐—ธ๐—ถ๐—ป๐—ด ๐˜‚๐˜€ : ๐—ฏ๐˜† ๐Ÿฎ๐Ÿฌ๐Ÿฎ๐Ÿด, ๐—”๐—œ ๐—–๐—น๐˜‚๐˜€๐˜๐—ฒ๐—ฟ๐˜€ ๐˜„๐—ถ๐—น๐—น ๐—ฟ๐—ฒ๐—ฎ๐—ฐ๐—ต ๐˜๐—ต๐—ฒ ๐—ฝ๐—ผ๐˜„๐—ฒ๐—ฟ ๐—ฐ๐—ผ๐—ป๐˜€๐˜‚๐—บ๐—ฝ๐˜๐—ถ๐—ผ๐—ป ๐—ผ๐—ณ ๐—ฒ๐—ป๐˜๐—ถ๐—ฟ๐—ฒ ๐—ฐ๐—ผ๐˜‚๐—ป๐˜๐—ฟ๐—ถ๐—ฒ๐˜€ Reminder : โ€œScaling lawsโ€ are empirical laws saying that if you keep multiplying your compute by x10, your models will mechanically keep getting better and better. To give you an idea, GPT-3 can barely write sentences, and GPT-4, which only used x15 its amount of compute, already sounds much smarter than some of my friends (although it's not really - or at least I haven't tested them side-by side). So you can imagine how far a x100 over GPT-4 can take us. ๐ŸŽ๏ธย As a result, tech titans are racing to build the biggest models, and for this they need gigantic training clusters. The picture below shows the growth of training compute: it is increasing at a steady exponential rate of a x10 every 2 years. So letโ€™s take this progress a bit further: - 2022: starting training for GPT-4 : 10^26 FLOPs, cost of $100M - 2024: today, companies start training on much larger clusters like the โ€œsuper AI clusterโ€ of Elon Muskโ€™s xAI, 10^27 FLOPS, $1B - 2026 : by then clusters will require 1GW, i.e. around the full power generated by a nuclear reactor - 2028: we reach cluster prices in the 100 billion dollars, using 10GW, more than the most powerful power stations currently in use in the US. This last size seems crazy, but Microsoft and OpenAI already are planning one. Will AI clusters effectively reach these crazy sizes where the consume as much as entire countries? โžก๏ธย Three key ingredients of training might be a roadblock to scaling up : ๐Ÿ’ธย Money: but itโ€™s very unlikely, given the potential market size for AGI, that investors lose interest. โšก๏ธ Energy supply at a specific location ๐Ÿ“šย Training data: weโ€™re already using 15 trillion tokens for Llama-3.1 when Internet has something like 60 trillion. ๐Ÿค”ย Iโ€™d be curious to hear your thoughts: do you think weโ€™ll race all the way there?
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg", "fullname": "Aymeric Roucher", "name": "m-ric", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 476, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/SeVb6BylGnaZ-BAubraaw.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "Kaoeiri" ], "count": 2 }, { "reaction": "๐Ÿ‘", "users": [ "lamhieu", "Kaoeiri" ], "count": 2 } ]
2024-09-05T14:18:20.000Z
2024-09-06T14:05:45.366Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/600ae38cc92b79f54efd4556/cSqRIslYl5L3I4WK3a31f.png", "fullname": "Hieu Lam", "name": "lamhieu", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 74, "isFollowing": false }, { "avatarUrl": "/avatars/ac25f29292cca71ab6d509ea781e7943.svg", "fullname": "Shareef Taylor", "name": "MANOFAi94", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662162fd296b3d40f15367a4/jM74dtHuAGI6UlLGT7A9s.jpeg", "fullname": "Stephen Genusa", "name": "StephenGenusa", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false } ]
/posts/m-ric/834561196751118
842
3
935467526386612
[ { "type": "text", "value": "๐ŸŒ Introducing PPT Online Dataset - ", "raw": "๐ŸŒ Introducing PPT Online Dataset - ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/nyuuzyou/pptonline", "resource": { "type": "dataset", "id": "nyuuzyou/pptonline", "discussionNum": null }, "url": "https://huggingface.co/datasets/nyuuzyou/pptonline", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Dataset highlights:", "raw": "Dataset highlights:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Metadata for 1,418,349 PowerPoint (.ppt) files from ppt-online.org", "raw": "- Metadata for 1,418,349 PowerPoint (.ppt) files from ppt-online.org", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Multilingual content: Russian, Ukrainian, Belarusian, Kazakh, English, and others", "raw": "- Multilingual content: Russian, Ukrainian, Belarusian, Kazakh, English, and others", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Each entry includes: Unique ID, title, category, download link, file size, and content snippet", "raw": "- Each entry includes: Unique ID, title, category, download link, file size, and content snippet", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Data reflects presentations accessible through the PPT Online platform", "raw": "- Data reflects presentations accessible through the PPT Online platform", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Licensed under Creative Commons Zero (CC0) for unrestricted use", "raw": "- Licensed under Creative Commons Zero (CC0) for unrestricted use", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "This dataset offers a unique window into online educational resources, particularly in Eastern European and Central Asian contexts. It provides opportunities for analyzing presentation trends, topic distributions, and language patterns in educational materials.", "raw": "This dataset offers a unique window into online educational resources, particularly in Eastern European and Central Asian contexts. It provides opportunities for analyzing presentation trends, topic distributions, and language patterns in educational materials.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
๐ŸŒ Introducing PPT Online Dataset - https://huggingface.co/datasets/nyuuzyou/pptonline Dataset highlights: - Metadata for 1,418,349 PowerPoint (.ppt) files from ppt-online.org - Multilingual content: Russian, Ukrainian, Belarusian, Kazakh, English, and others - Each entry includes: Unique ID, title, category, download link, file size, and content snippet - Data reflects presentations accessible through the PPT Online platform - Licensed under Creative Commons Zero (CC0) for unrestricted use This dataset offers a unique window into online educational resources, particularly in Eastern European and Central Asian contexts. It provides opportunities for analyzing presentation trends, topic distributions, and language patterns in educational materials.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/643ac5d2e2b979ae6144d68c/Z7PCNopn4cQeAYnVJDoqG.png", "fullname": "nyuuzyou", "name": "nyuuzyou", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 58, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "benjamin-paine", "RazNT" ], "count": 3 } ]
2024-09-05T12:15:28.000Z
2024-09-05T12:15:28.806Z
[]
/posts/nyuuzyou/935467526386612
797
0
148486966241479
[ { "type": "text", "value": "Hey everyone ๐Ÿค—!", "raw": "Hey everyone ๐Ÿค—!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "We (finegrain) have created some custom ComfyUI nodes to use our refiners micro-framework inside comfy! ๐ŸŽ‰", "raw": "We (finegrain) have created some custom ComfyUI nodes to use our refiners micro-framework inside comfy! ๐ŸŽ‰", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "We only support our new Box Segmenter at the moment, but we're thinking of adding more nodes since there seems to be a demand for it. We leverage the new (beta) Comfy Registry to host our nodes. They are available at: ", "raw": "We only support our new Box Segmenter at the moment, but we're thinking of adding more nodes since there seems to be a demand for it. We leverage the new (beta) Comfy Registry to host our nodes. They are available at: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://registry.comfy.org/publishers/finegrain/nodes/comfyui-refiners", "resource": null, "url": null, "href": "https://registry.comfy.org/publishers/finegrain/nodes/comfyui-refiners", "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": ". You can install them by running:", "raw": ". You can install them by running:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "code_fence", "value": null, "raw": "```\ncomfy node registry-install comfyui-refiners\n```", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": "comfy node registry-install comfyui-refiners", "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Or by unzipping the archive you can download by clicking \"Download Latest\" into your ", "raw": "Or by unzipping the archive you can download by clicking \"Download Latest\" into your ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "inline_code", "value": null, "raw": "`custom_nodes`", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": "custom_nodes", "label": null }, { "type": "text", "value": " comfy folder.", "raw": " comfy folder.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "We are eager to hear your feedbacks and suggestions for new nodes and how you'll use them! ๐Ÿ™", "raw": "We are eager to hear your feedbacks and suggestions for new nodes and how you'll use them! ๐Ÿ™", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
Hey everyone ๐Ÿค—! We (finegrain) have created some custom ComfyUI nodes to use our refiners micro-framework inside comfy! ๐ŸŽ‰ We only support our new Box Segmenter at the moment, but we're thinking of adding more nodes since there seems to be a demand for it. We leverage the new (beta) Comfy Registry to host our nodes. They are available at: https://registry.comfy.org/publishers/finegrain/nodes/comfyui-refiners. You can install them by running: ``` comfy node registry-install comfyui-refiners ``` Or by unzipping the archive you can download by clicking "Download Latest" into your `custom_nodes` comfy folder. We are eager to hear your feedbacks and suggestions for new nodes and how you'll use them! ๐Ÿ™
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1669043420538-6364f1784f773b7e4cede70c.jpeg", "fullname": "Laureฮทt Fainsin", "name": "1aurent", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 79, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "limiteinductive", "John6666", "lunarflu", "djuna", "catwell" ], "count": 5 } ]
2024-09-05T11:48:01.000Z
2024-09-07T10:40:15.861Z
[]
/posts/1aurent/148486966241479
1,059
0
436311113936516
[ { "type": "text", "value": "๐Ÿ™‹๐Ÿปโ€โ™‚๏ธHey there folks,", "raw": "๐Ÿ™‹๐Ÿปโ€โ™‚๏ธHey there folks,", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Did you see the new coding model from ", "raw": "Did you see the new coding model from ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@01-ai", "resource": null, "url": null, "href": null, "user": "01-ai", "lang": null, "code": null, "label": null }, { "type": "text", "value": " ? ", "raw": " ? ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "collection : ", "raw": "collection : ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/01-ai/yi-coder-66bdb00f5bdd611f9a008f30", "resource": { "type": "collection", "id": "01-ai/yi-coder-66bdb00f5bdd611f9a008f30", "discussionNum": null }, "url": "https://huggingface.co/collections/01-ai/yi-coder-66bdb00f5bdd611f9a008f30", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "demo : ", "raw": "demo : ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Tonic/Yi-Coder-9B", "resource": { "type": "space", "id": "Tonic/Yi-Coder-9B", "discussionNum": null }, "url": "https://huggingface.co/spaces/Tonic/Yi-Coder-9B", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "achieves SOTA on benchmarks , 125K context window , 55 languages including Docker, Js and many more ๐Ÿš€", "raw": "achieves SOTA on benchmarks , 125K context window , 55 languages including Docker, Js and many more ๐Ÿš€", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
๐Ÿ™‹๐Ÿปโ€โ™‚๏ธHey there folks, Did you see the new coding model from @01-ai ? collection : https://huggingface.co/collections/01-ai/yi-coder-66bdb00f5bdd611f9a008f30 demo : https://huggingface.co/spaces/Tonic/Yi-Coder-9B achieves SOTA on benchmarks , 125K context window , 55 languages including Docker, Js and many more ๐Ÿš€
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 310, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿš€", "users": [ "web3builder", "John6666", "louisbrulenaudet", "djuna", "KingNish" ], "count": 5 } ]
2024-09-05T09:56:55.000Z
2024-09-05T13:32:54.611Z
[ { "avatarUrl": "/avatars/1280748c5a2e24a8f00618b544c9749a.svg", "fullname": "leuneli", "name": "leuneli", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/Tonic/436311113936516
1,089
1
995511131459162
[ { "type": "text", "value": "If you have documents that do not only have text and you're doing retrieval or RAG (using OCR and LLMs), give it up and give ColPali and vision language models a try ๐Ÿค—", "raw": "If you have documents that do not only have text and you're doing retrieval or RAG (using OCR and LLMs), give it up and give ColPali and vision language models a try ๐Ÿค—", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Why? Documents consist of multiple modalities: layout, table, text, chart, images. Document processing pipelines often consist of multiple models and they're immensely brittle and slow. ๐Ÿฅฒ", "raw": "Why? Documents consist of multiple modalities: layout, table, text, chart, images. Document processing pipelines often consist of multiple models and they're immensely brittle and slow. ๐Ÿฅฒ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "How? ColPali is a ColBERT-like document retrieval model built on PaliGemma, it operates over image patches directly, and indexing takes far less time with more accuracy. You can use it for retrieval, and if you want to do retrieval augmented generation, find the closest document, and do not process it, give it directly to a VLM like Qwen2-VL (as image input) and give your text query. ๐Ÿค", "raw": "How? ColPali is a ColBERT-like document retrieval model built on PaliGemma, it operates over image patches directly, and indexing takes far less time with more accuracy. You can use it for retrieval, and if you want to do retrieval augmented generation, find the closest document, and do not process it, give it directly to a VLM like Qwen2-VL (as image input) and give your text query. ๐Ÿค", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "This is much faster + you do not lose out on any information + much easier to maintain too! ๐Ÿฅณ", "raw": "This is much faster + you do not lose out on any information + much easier to maintain too! ๐Ÿฅณ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Multimodal RAG ", "raw": "Multimodal RAG ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/merve/multimodal-rag-66d97602e781122aae0a5139", "resource": { "type": "collection", "id": "merve/multimodal-rag-66d97602e781122aae0a5139", "discussionNum": null }, "url": "https://huggingface.co/collections/merve/multimodal-rag-66d97602e781122aae0a5139", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " ๐Ÿ’ฌ", "raw": " ๐Ÿ’ฌ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Document AI (made it way before, for folks who want structured input/output and can fine-tune a model) ", "raw": "Document AI (made it way before, for folks who want structured input/output and can fine-tune a model) ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/merve/awesome-document-ai-65ef1cdc2e97ef9cc85c898e", "resource": { "type": "collection", "id": "merve/awesome-document-ai-65ef1cdc2e97ef9cc85c898e", "discussionNum": null }, "url": "https://huggingface.co/collections/merve/awesome-document-ai-65ef1cdc2e97ef9cc85c898e", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " ๐Ÿ“–", "raw": " ๐Ÿ“–", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
If you have documents that do not only have text and you're doing retrieval or RAG (using OCR and LLMs), give it up and give ColPali and vision language models a try ๐Ÿค— Why? Documents consist of multiple modalities: layout, table, text, chart, images. Document processing pipelines often consist of multiple models and they're immensely brittle and slow. ๐Ÿฅฒ How? ColPali is a ColBERT-like document retrieval model built on PaliGemma, it operates over image patches directly, and indexing takes far less time with more accuracy. You can use it for retrieval, and if you want to do retrieval augmented generation, find the closest document, and do not process it, give it directly to a VLM like Qwen2-VL (as image input) and give your text query. ๐Ÿค This is much faster + you do not lose out on any information + much easier to maintain too! ๐Ÿฅณ Multimodal RAG https://huggingface.co/collections/merve/multimodal-rag-66d97602e781122aae0a5139 ๐Ÿ’ฌ Document AI (made it way before, for folks who want structured input/output and can fine-tune a model) https://huggingface.co/collections/merve/awesome-document-ai-65ef1cdc2e97ef9cc85c898e ๐Ÿ“–
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png", "fullname": "Merve Noyan", "name": "merve", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5520, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6141a88b3a0ec78603c9e784/gRQUP4l8E5DzT2N-PeNYx.jpeg" } ]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "adorkin", "web3builder", "John6666", "Percifal", "jrmasiero", "rwightman", "seek007", "abishekcodes", "zliu", "AI4Industry", "louisbrulenaudet", "byteprobe", "muhtasham", "rumbleFTW" ], "count": 14 }, { "reaction": "๐Ÿ”ฅ", "users": [ "umair894", "abishekcodes", "fsommers", "jithinrocs", "rumbleFTW" ], "count": 5 }, { "reaction": "โค๏ธ", "users": [ "Csplk", "rumbleFTW", "madstuntman11" ], "count": 3 } ]
2024-09-05T09:17:38.000Z
2024-09-21T20:09:39.856Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6444b3135af87c73bbbd7447/-WLquJY3E1KZSJbnYUkwD.jpeg", "fullname": "Frank Sommers", "name": "fsommers", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 7, "isFollowing": false } ]
/posts/merve/995511131459162
3,796
2
866472607836541
[ { "type": "text", "value": "๐Ÿ”ฅ Dataset Viber 0.3 launches with Synthesizer to synthesise data with a human in the loop, for free, using open source models with Argilla's distilabel but within a quick-and-easy Gradio Interface.", "raw": "๐Ÿ”ฅ Dataset Viber 0.3 launches with Synthesizer to synthesise data with a human in the loop, for free, using open source models with Argilla's distilabel but within a quick-and-easy Gradio Interface.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Why? Not trying to be all fancy and formal just to iterate on your data and to get familiar with your prompts and the produced data. Under the hood, it relies on Hugging Face Inference endpoints and the latest LLMs and VLMs like Meta Llama 3.1 and BlackForest Labs Flux models.", "raw": "Why? Not trying to be all fancy and formal just to iterate on your data and to get familiar with your prompts and the produced data. Under the hood, it relies on Hugging Face Inference endpoints and the latest LLMs and VLMs like Meta Llama 3.1 and BlackForest Labs Flux models.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "An addition to the other Interfaces that are already support.", "raw": "An addition to the other Interfaces that are already support.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- CollectorInterface: Lazily collect data of model interactions without human annotation.", "raw": "- CollectorInterface: Lazily collect data of model interactions without human annotation.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- AnnotatorInterface: Walk through your data and annotate it with models in the loop.", "raw": "- AnnotatorInterface: Walk through your data and annotate it with models in the loop.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Synthesizer: Synthesize data with distilabel in the loop.", "raw": "- Synthesizer: Synthesize data with distilabel in the loop.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- BulkInterface: Explore your data distribution and annotate in bulk.", "raw": "- BulkInterface: Explore your data distribution and annotate in bulk.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "โญ๏ธ Give some good vibes: ", "raw": "โญ๏ธ Give some good vibes: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://github.com/davidberenstein1957/dataset-viber", "resource": null, "url": null, "href": "https://github.com/davidberenstein1957/dataset-viber", "user": null, "lang": null, "code": null, "label": null } ]
๐Ÿ”ฅ Dataset Viber 0.3 launches with Synthesizer to synthesise data with a human in the loop, for free, using open source models with Argilla's distilabel but within a quick-and-easy Gradio Interface. Why? Not trying to be all fancy and formal just to iterate on your data and to get familiar with your prompts and the produced data. Under the hood, it relies on Hugging Face Inference endpoints and the latest LLMs and VLMs like Meta Llama 3.1 and BlackForest Labs Flux models. An addition to the other Interfaces that are already support. - CollectorInterface: Lazily collect data of model interactions without human annotation. - AnnotatorInterface: Walk through your data and annotate it with models in the loop. - Synthesizer: Synthesize data with distilabel in the loop. - BulkInterface: Explore your data distribution and annotate in bulk. โญ๏ธ Give some good vibes: https://github.com/davidberenstein1957/dataset-viber
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1677141720071-634ff41ff32062e9eb7b06a3.jpeg", "fullname": "David Berenstein", "name": "davidberenstein1957", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 148, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/634ff41ff32062e9eb7b06a3/hXo1fjJ_P7vCKo2brM5HW.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-09-05T08:48:03.000Z
2024-09-05T08:48:03.787Z
[]
/posts/davidberenstein1957/866472607836541
293
0
579064956863993
[ { "type": "text", "value": "Datapluck: Portability Tool for Huggingface Datasets", "raw": "Datapluck: Portability Tool for Huggingface Datasets", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "\"I found myself recently whipping up notebooks just to pull huggingface datasets locally, annotate or operate changes and update them again. This happened often enough that I made a cli tool out of it, which I've been using successfully for the last few months.", "raw": "\"I found myself recently whipping up notebooks just to pull huggingface datasets locally, annotate or operate changes and update them again. This happened often enough that I made a cli tool out of it, which I've been using successfully for the last few months.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "While huggingface uses open formats, I found the official toolchain relatively low-level and not adapted to quick operations such as what I am doing.\"", "raw": "While huggingface uses open formats, I found the official toolchain relatively low-level and not adapted to quick operations such as what I am doing.\"", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "~ ", "raw": "~ ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@omarkamali", "resource": null, "url": null, "href": null, "user": "omarkamali", "lang": null, "code": null, "label": null }, { "type": "text", "value": " ", "raw": " ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Link : ", "raw": "Link : ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://omarkama.li/blog/datapluck", "resource": null, "url": null, "href": "https://omarkama.li/blog/datapluck", "user": null, "lang": null, "code": null, "label": null } ]
Datapluck: Portability Tool for Huggingface Datasets "I found myself recently whipping up notebooks just to pull huggingface datasets locally, annotate or operate changes and update them again. This happened often enough that I made a cli tool out of it, which I've been using successfully for the last few months. While huggingface uses open formats, I found the official toolchain relatively low-level and not adapted to quick operations such as what I am doing." ~ @omarkamali Link : https://omarkama.li/blog/datapluck
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/626237d9bbcbd1c34f1bb231/EJrOjvAL-68qMCYdnvOrq.png", "fullname": "Ali El Filali", "name": "alielfilali01", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 188, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/665cc58d164b78e36b655f25/yiyOVgR3YKe_qNa5xEmu-.jpeg", "fullname": "Omar Kamali", "name": "omarkamali", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1 } ]
[ { "reaction": "โค๏ธ", "users": [ "omarkamali", "abdeljalilELmajjodi", "louisbrulenaudet" ], "count": 3 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "async0x42" ], "count": 2 } ]
2024-09-05T04:13:36.000Z
2024-09-05T12:17:30.997Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/665cc58d164b78e36b655f25/yiyOVgR3YKe_qNa5xEmu-.jpeg", "fullname": "Omar Kamali", "name": "omarkamali", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false } ]
/posts/alielfilali01/579064956863993
1,086
1
290847981802358
[ { "type": "text", "value": "Just wrapped up a deep dive into the latest lecture on building LLMs, such as ChatGPT, from ", "raw": "Just wrapped up a deep dive into the latest lecture on building LLMs, such as ChatGPT, from ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@Stanford", "resource": null, "url": null, "href": null, "user": "Stanford", "lang": null, "code": null, "label": null }, { "type": "text", "value": " CS229 course. Here are my top takeaways:", "raw": " CS229 course. Here are my top takeaways:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ” Understanding the Components: LLMs like ChatGPT, Claude, and others are more than just neural networks; they are a complex blend of architecture, training loss, data evaluation, and systems. Knowing how these components work together is key to improving and scaling these models.", "raw": "๐Ÿ” Understanding the Components: LLMs like ChatGPT, Claude, and others are more than just neural networks; they are a complex blend of architecture, training loss, data evaluation, and systems. Knowing how these components work together is key to improving and scaling these models.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ“Š Scaling Matters: Performance improves predictably with more data, bigger models, and greater computational power. However, balancing these factors is crucial to avoid overfitting and resource waste.", "raw": "๐Ÿ“Š Scaling Matters: Performance improves predictably with more data, bigger models, and greater computational power. However, balancing these factors is crucial to avoid overfitting and resource waste.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ“ˆ Data is King: LLMs are trained on trillions of tokens scraped from the internet, but the quality of this data matters immensely. Rigorous filtering and deduplication processes are essential to maintaining data integrity.", "raw": "๐Ÿ“ˆ Data is King: LLMs are trained on trillions of tokens scraped from the internet, but the quality of this data matters immensely. Rigorous filtering and deduplication processes are essential to maintaining data integrity.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ—๏ธ Pre-Training vs. Post-Training: While pre-training equips the model with general knowledge, post-training (like RLHF) fine-tunes it to follow human-like responses, reducing toxic outputs and improving alignment with human values.", "raw": "๐Ÿ—๏ธ Pre-Training vs. Post-Training: While pre-training equips the model with general knowledge, post-training (like RLHF) fine-tunes it to follow human-like responses, reducing toxic outputs and improving alignment with human values.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐ŸŒ Reinforcement Learning from Human Feedback (RLHF): This technique allows LLMs to maximize outputs that align with human preferences, making models more reliable and accurate.", "raw": "๐ŸŒ Reinforcement Learning from Human Feedback (RLHF): This technique allows LLMs to maximize outputs that align with human preferences, making models more reliable and accurate.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ’ก Why It Matters: Understanding these processes not only helps us appreciate the complexity behind our everyday AI tools but also highlights the challenges and opportunities in the ever-evolving field of AI.", "raw": "๐Ÿ’ก Why It Matters: Understanding these processes not only helps us appreciate the complexity behind our everyday AI tools but also highlights the challenges and opportunities in the ever-evolving field of AI.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Whether youโ€™re in tech, data science, or just AI-curious, staying updated on these advancements is crucial. LLMs are not just transforming industries; theyโ€™re redefining the future of human-computer interaction!", "raw": "Whether youโ€™re in tech, data science, or just AI-curious, staying updated on these advancements is crucial. LLMs are not just transforming industries; theyโ€™re redefining the future of human-computer interaction!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "I just realized this was almost 2 hours long...", "raw": "I just realized this was almost 2 hours long...", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Link: ", "raw": "Link: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://www.youtube.com/watch?v=9vM4p9NN0Ts", "resource": null, "url": null, "href": "https://www.youtube.com/watch?v=9vM4p9NN0Ts", "user": null, "lang": null, "code": null, "label": null } ]
Just wrapped up a deep dive into the latest lecture on building LLMs, such as ChatGPT, from @Stanford CS229 course. Here are my top takeaways: ๐Ÿ” Understanding the Components: LLMs like ChatGPT, Claude, and others are more than just neural networks; they are a complex blend of architecture, training loss, data evaluation, and systems. Knowing how these components work together is key to improving and scaling these models. ๐Ÿ“Š Scaling Matters: Performance improves predictably with more data, bigger models, and greater computational power. However, balancing these factors is crucial to avoid overfitting and resource waste. ๐Ÿ“ˆ Data is King: LLMs are trained on trillions of tokens scraped from the internet, but the quality of this data matters immensely. Rigorous filtering and deduplication processes are essential to maintaining data integrity. ๐Ÿ—๏ธ Pre-Training vs. Post-Training: While pre-training equips the model with general knowledge, post-training (like RLHF) fine-tunes it to follow human-like responses, reducing toxic outputs and improving alignment with human values. ๐ŸŒ Reinforcement Learning from Human Feedback (RLHF): This technique allows LLMs to maximize outputs that align with human preferences, making models more reliable and accurate. ๐Ÿ’ก Why It Matters: Understanding these processes not only helps us appreciate the complexity behind our everyday AI tools but also highlights the challenges and opportunities in the ever-evolving field of AI. Whether youโ€™re in tech, data science, or just AI-curious, staying updated on these advancements is crucial. LLMs are not just transforming industries; theyโ€™re redefining the future of human-computer interaction! I just realized this was almost 2 hours long... Link: https://www.youtube.com/watch?v=9vM4p9NN0Ts
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 197, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/rLGOeupSDU6QEGWMEkQuB.png" } ]
[]
[ { "reaction": "โค๏ธ", "users": [ "dongnt", "alielfilali01", "Joseph717171", "dsmonk", "louisbrulenaudet" ], "count": 5 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "Joseph717171" ], "count": 2 }, { "reaction": "๐Ÿ‘", "users": [ "lamhieu" ], "count": 1 } ]
2024-09-04T21:37:25.000Z
2024-09-06T10:00:29.344Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6569216f9c96f1a47bf45788/mCLqmAs4dOjKdxNQVAp1w.png", "fullname": "Sica Rius", "name": "SicariusSicariiStuff", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 132, "isFollowing": false }, { "avatarUrl": "/avatars/ea4398745974d781ae9dc0e95b12cabe.svg", "fullname": "Joseph", "name": "Joseph717171", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 22, "isFollowing": false }, { "avatarUrl": "/avatars/4d77428c302dc8866e0073c3ce667323.svg", "fullname": "vhjghvy uyfyfuyfy", "name": "WbjuSrceu", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/singhsidhukuldeep/290847981802358
1,628
3
993063646272657
[ { "type": "text", "value": "I just bought HF Pro but i don't know how many request per month i can get, if i request 1 time every 5s, around 2k token, is the pro account enough?, thanks for reading", "raw": "I just bought HF Pro but i don't know how many request per month i can get, if i request 1 time every 5s, around 2k token, is the pro account enough?, thanks for reading", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
I just bought HF Pro but i don't know how many request per month i can get, if i request 1 time every 5s, around 2k token, is the pro account enough?, thanks for reading
{ "avatarUrl": "/avatars/bbaffa3a6cfe0fc224d02d4dc8454886.svg", "fullname": "Cao Trong Thang", "name": "fptisthebest", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "fptisthebest", "davidberenstein1957", "Tonic" ], "count": 4 } ]
2024-09-04T21:19:51.000Z
2024-09-05T02:40:17.234Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 384, "isFollowing": false } ]
/posts/fptisthebest/993063646272657
851
1
254051507992365
[ { "type": "text", "value": "My tool calling playgrounds repo has been updated again to include the use of flux1-schnell or dev image generation. This functionality is similar to using Dall-E 3 via the ", "raw": "My tool calling playgrounds repo has been updated again to include the use of flux1-schnell or dev image generation. This functionality is similar to using Dall-E 3 via the ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "inline_code", "value": null, "raw": "`@`", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": "@", "label": null }, { "type": "text", "value": " decorator in ChatGPT. Once the function is selected, the model will either extract or improve your prompt (depending on how you ask).", "raw": " decorator in ChatGPT. Once the function is selected, the model will either extract or improve your prompt (depending on how you ask).", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "I have also included 2 notebooks that cover different ways to access Flux for your specific use case. The first method covers how to access flux via LitServe from Lightning AI. LitServe is a bare-bones inference engine with a focus on modularity rather than raw performance. LitServe supports text generation models as well as image generation, which is great for some use cases, but does not provide the caching mechanisms from a dedicated image generation solution. ", "raw": "I have also included 2 notebooks that cover different ways to access Flux for your specific use case. The first method covers how to access flux via LitServe from Lightning AI. LitServe is a bare-bones inference engine with a focus on modularity rather than raw performance. LitServe supports text generation models as well as image generation, which is great for some use cases, but does not provide the caching mechanisms from a dedicated image generation solution. ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Since dedicated caching mechanisms are so crucial to performance, I also included an example for how to integrate SwarmUI/ComfyUI to utilize a more dedicated infrastructure that may already be running as part of your tech stack. Resulting in a Llama-3.1 capable of utilizing specific ComfyUI JSON configs, and many different settings. ", "raw": "Since dedicated caching mechanisms are so crucial to performance, I also included an example for how to integrate SwarmUI/ComfyUI to utilize a more dedicated infrastructure that may already be running as part of your tech stack. Resulting in a Llama-3.1 capable of utilizing specific ComfyUI JSON configs, and many different settings. ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Lastly, I tested the response times for each over a small batch request to simulate a speed test.", "raw": "Lastly, I tested the response times for each over a small batch request to simulate a speed test.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "It becomes clear quickly how efficient caching mechanisms can greatly reduce the generation time, even in a scenario where another model is called. An average 4.5 second response time is not bad at all when you consider that an 8B model is calling a 12B parameter model for a secondary generation.", "raw": "It becomes clear quickly how efficient caching mechanisms can greatly reduce the generation time, even in a scenario where another model is called. An average 4.5 second response time is not bad at all when you consider that an 8B model is calling a 12B parameter model for a secondary generation.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Repo: ", "raw": "Repo: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://github.com/tdolan21/tool-calling-playground", "resource": null, "url": null, "href": "https://github.com/tdolan21/tool-calling-playground", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "LitServe: ", "raw": "LitServe: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://github.com/Lightning-AI/LitServe", "resource": null, "url": null, "href": "https://github.com/Lightning-AI/LitServe", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "SwarmUI: ", "raw": "SwarmUI: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://github.com/mcmonkeyprojects/SwarmUI", "resource": null, "url": null, "href": "https://github.com/mcmonkeyprojects/SwarmUI", "user": null, "lang": null, "code": null, "label": null } ]
My tool calling playgrounds repo has been updated again to include the use of flux1-schnell or dev image generation. This functionality is similar to using Dall-E 3 via the `@` decorator in ChatGPT. Once the function is selected, the model will either extract or improve your prompt (depending on how you ask). I have also included 2 notebooks that cover different ways to access Flux for your specific use case. The first method covers how to access flux via LitServe from Lightning AI. LitServe is a bare-bones inference engine with a focus on modularity rather than raw performance. LitServe supports text generation models as well as image generation, which is great for some use cases, but does not provide the caching mechanisms from a dedicated image generation solution. Since dedicated caching mechanisms are so crucial to performance, I also included an example for how to integrate SwarmUI/ComfyUI to utilize a more dedicated infrastructure that may already be running as part of your tech stack. Resulting in a Llama-3.1 capable of utilizing specific ComfyUI JSON configs, and many different settings. Lastly, I tested the response times for each over a small batch request to simulate a speed test. It becomes clear quickly how efficient caching mechanisms can greatly reduce the generation time, even in a scenario where another model is called. An average 4.5 second response time is not bad at all when you consider that an 8B model is calling a 12B parameter model for a secondary generation. Repo: https://github.com/tdolan21/tool-calling-playground LitServe: https://github.com/Lightning-AI/LitServe SwarmUI: https://github.com/mcmonkeyprojects/SwarmUI
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6455cc8d679315e4ef16fbec/M6Cfifn05BUzkCFd2QDIT.png", "fullname": "Tim Dolan", "name": "macadeliccc", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 152, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6455cc8d679315e4ef16fbec/Fhl8PQ2daHSCs9bQkvRTo.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6455cc8d679315e4ef16fbec/Fo3QQLzYVJMT-eqKxxUAX.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 }, { "reaction": "๐Ÿ”ฅ", "users": [ "louisbrulenaudet" ], "count": 1 } ]
2024-09-04T17:01:20.000Z
2024-09-04T17:01:20.418Z
[]
/posts/macadeliccc/254051507992365
938
0
267778050099092
[ { "type": "text", "value": "๐Ÿฅณ ๐—ง๐—ฟ๐—ฎ๐—ป๐˜€๐—ณ๐—ผ๐—ฟ๐—บ๐—ฒ๐—ฟ๐˜€ ๐—”๐—ด๐—ฒ๐—ป๐˜๐˜€ ๐—ป๐—ผ๐˜„ ๐˜€๐˜‚๐—ฝ๐—ฝ๐—ผ๐—ฟ๐˜๐˜€ ๐— ๐˜‚๐—น๐˜๐—ถ-๐—ฎ๐—ด๐—ฒ๐—ป๐˜ ๐˜€๐˜†๐˜€๐˜๐—ฒ๐—บ๐˜€!", "raw": "๐Ÿฅณ ๐—ง๐—ฟ๐—ฎ๐—ป๐˜€๐—ณ๐—ผ๐—ฟ๐—บ๐—ฒ๐—ฟ๐˜€ ๐—”๐—ด๐—ฒ๐—ป๐˜๐˜€ ๐—ป๐—ผ๐˜„ ๐˜€๐˜‚๐—ฝ๐—ฝ๐—ผ๐—ฟ๐˜๐˜€ ๐— ๐˜‚๐—น๐˜๐—ถ-๐—ฎ๐—ด๐—ฒ๐—ป๐˜ ๐˜€๐˜†๐˜€๐˜๐—ฒ๐—บ๐˜€!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Multi-agent systems have been introduced in Microsoft's framework Autogen. It simply means having several agents working together to solve your task instead of only one : this paradigm empirically yields better performance on most benchmarks. The reason for this better performance is conceptually simple: for many tasks, rather than using a do-it-all system, you would prefer to specialize units on sub-tasks. Here, having agents with separate tool sets and memories allows to achieve efficient specialization.", "raw": "Multi-agent systems have been introduced in Microsoft's framework Autogen. It simply means having several agents working together to solve your task instead of only one : this paradigm empirically yields better performance on most benchmarks. The reason for this better performance is conceptually simple: for many tasks, rather than using a do-it-all system, you would prefer to specialize units on sub-tasks. Here, having agents with separate tool sets and memories allows to achieve efficient specialization.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "You can now easily build hierarchical multi-agent systems with transformers.agents (not released yet, use the dev version)", "raw": "You can now easily build hierarchical multi-agent systems with transformers.agents (not released yet, use the dev version)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "To do so, encapsulate the agent in a ManagedAgent object. This object needs arguments agent, name, and a description, which will then be embedded in the manager agent's system prompt to let it know how to call this managed agent, as we also do for tools.", "raw": "To do so, encapsulate the agent in a ManagedAgent object. This object needs arguments agent, name, and a description, which will then be embedded in the manager agent's system prompt to let it know how to call this managed agent, as we also do for tools.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Cf the example in the image! We'll keep building on this paradigm in the upcoming weeks ๐Ÿš€", "raw": "Cf the example in the image! We'll keep building on this paradigm in the upcoming weeks ๐Ÿš€", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Read more in the doc ๐Ÿ‘‰ ", "raw": "Read more in the doc ๐Ÿ‘‰ ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://github.com/huggingface/transformers/blob/main/docs/source/en/agents_advanced.md", "resource": null, "url": null, "href": "https://github.com/huggingface/transformers/blob/main/docs/source/en/agents_advanced.md", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Checkout an advanced multi-agent system that tops the GAIA leaderboard ๐Ÿ‘‰ ", "raw": "Checkout an advanced multi-agent system that tops the GAIA leaderboard ๐Ÿ‘‰ ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://github.com/aymeric-roucher/GAIA/blob/main/gaia_multiagent.py", "resource": null, "url": null, "href": "https://github.com/aymeric-roucher/GAIA/blob/main/gaia_multiagent.py", "user": null, "lang": null, "code": null, "label": null } ]
๐Ÿฅณ ๐—ง๐—ฟ๐—ฎ๐—ป๐˜€๐—ณ๐—ผ๐—ฟ๐—บ๐—ฒ๐—ฟ๐˜€ ๐—”๐—ด๐—ฒ๐—ป๐˜๐˜€ ๐—ป๐—ผ๐˜„ ๐˜€๐˜‚๐—ฝ๐—ฝ๐—ผ๐—ฟ๐˜๐˜€ ๐— ๐˜‚๐—น๐˜๐—ถ-๐—ฎ๐—ด๐—ฒ๐—ป๐˜ ๐˜€๐˜†๐˜€๐˜๐—ฒ๐—บ๐˜€! Multi-agent systems have been introduced in Microsoft's framework Autogen. It simply means having several agents working together to solve your task instead of only one : this paradigm empirically yields better performance on most benchmarks. The reason for this better performance is conceptually simple: for many tasks, rather than using a do-it-all system, you would prefer to specialize units on sub-tasks. Here, having agents with separate tool sets and memories allows to achieve efficient specialization. You can now easily build hierarchical multi-agent systems with transformers.agents (not released yet, use the dev version) To do so, encapsulate the agent in a ManagedAgent object. This object needs arguments agent, name, and a description, which will then be embedded in the manager agent's system prompt to let it know how to call this managed agent, as we also do for tools. Cf the example in the image! We'll keep building on this paradigm in the upcoming weeks ๐Ÿš€ Read more in the doc ๐Ÿ‘‰ https://github.com/huggingface/transformers/blob/main/docs/source/en/agents_advanced.md Checkout an advanced multi-agent system that tops the GAIA leaderboard ๐Ÿ‘‰ https://github.com/aymeric-roucher/GAIA/blob/main/gaia_multiagent.py
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg", "fullname": "Aymeric Roucher", "name": "m-ric", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 476, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/zE2JkQiVNMx9HS_vs1NWd.png" } ]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "ibrahim313", "John6666", "osanseviero", "Kaoeiri", "dsmonk", "Csplk", "KingNish", "whitebill", "Winnougan" ], "count": 9 }, { "reaction": "๐Ÿค—", "users": [ "louisbrulenaudet", "Kaoeiri", "KingNish" ], "count": 3 } ]
2024-09-04T16:49:06.000Z
2024-09-04T16:49:06.292Z
[]
/posts/m-ric/267778050099092
2,130
0
169227177418296
[ { "type": "text", "value": "the new version of Enigma, our code-instruct specialist, is out now:", "raw": "the new version of Enigma, our code-instruct specialist, is out now:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- ", "raw": "- ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/ValiantLabs/Llama3.1-8B-Enigma", "resource": { "type": "model", "id": "ValiantLabs/Llama3.1-8B-Enigma", "discussionNum": null }, "url": "https://huggingface.co/ValiantLabs/Llama3.1-8B-Enigma", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " is trained on code-instruct and general chat data.", "raw": " is trained on code-instruct and general chat data.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- the updated code-instruct database is available now as well: ", "raw": "- the updated code-instruct database is available now as well: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/sequelbox/Tachibana", "resource": { "type": "dataset", "id": "sequelbox/Tachibana", "discussionNum": null }, "url": "https://huggingface.co/datasets/sequelbox/Tachibana", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "more to come soon!", "raw": "more to come soon!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
the new version of Enigma, our code-instruct specialist, is out now: - https://huggingface.co/ValiantLabs/Llama3.1-8B-Enigma is trained on code-instruct and general chat data. - the updated code-instruct database is available now as well: https://huggingface.co/datasets/sequelbox/Tachibana more to come soon!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63444f2687964b331809eb55/WvZivsvKsM_t0tBtakovK.png", "fullname": "t.d.a.g.", "name": "sequelbox", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 50, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "djuna" ], "count": 2 } ]
2024-09-04T16:23:27.000Z
2024-09-04T16:23:27.875Z
[]
/posts/sequelbox/169227177418296
711
0
317300660282714
[ { "type": "text", "value": "๐ŸฃAi2 Releasing OLMoE! ", "raw": "๐ŸฃAi2 Releasing OLMoE! ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "OLMoE-1B-7B-Instruct is a Mixture-of-Experts LLM with 1B active and 7B total parameters, and, OLMoE is 100% open-source in model, code-base, datasets!", "raw": "OLMoE-1B-7B-Instruct is a Mixture-of-Experts LLM with 1B active and 7B total parameters, and, OLMoE is 100% open-source in model, code-base, datasets!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿฆ–Paper: ", "raw": "๐Ÿฆ–Paper: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/abs/2409.02060", "resource": null, "url": null, "href": "https://arxiv.org/abs/2409.02060", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿค—Model: ", "raw": "๐Ÿค—Model: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/allenai/OLMoE-1B-7B-0924-Instruct", "resource": { "type": "model", "id": "allenai/OLMoE-1B-7B-0924-Instruct", "discussionNum": null }, "url": "https://huggingface.co/allenai/OLMoE-1B-7B-0924-Instruct", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ’พDatasets: ", "raw": "๐Ÿ’พDatasets: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/allenai/OLMoE-mix-0924", "resource": { "type": "dataset", "id": "allenai/OLMoE-mix-0924", "discussionNum": null }, "url": "https://huggingface.co/datasets/allenai/OLMoE-mix-0924", "href": null, "user": null, "lang": null, "code": null, "label": null } ]
๐ŸฃAi2 Releasing OLMoE! OLMoE-1B-7B-Instruct is a Mixture-of-Experts LLM with 1B active and 7B total parameters, and, OLMoE is 100% open-source in model, code-base, datasets! ๐Ÿฆ–Paper: https://arxiv.org/abs/2409.02060 ๐Ÿค—Model: https://huggingface.co/allenai/OLMoE-1B-7B-0924-Instruct ๐Ÿ’พDatasets: https://huggingface.co/datasets/allenai/OLMoE-mix-0924
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/642827944fe87caede802784/a7s3Ub9Cy6-PuuaX8wwXm.png", "fullname": "VILARIN", "name": "vilarin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 68, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "orrinin", "YaTharThShaRma999", "John6666", "osanseviero", "den0620", "louisbrulenaudet" ], "count": 6 }, { "reaction": "๐Ÿš€", "users": [ "sequelbox" ], "count": 1 } ]
2024-09-04T15:48:41.000Z
2024-09-06T08:54:44.424Z
[]
/posts/vilarin/317300660282714
1,580
0
761761396766140
[ { "type": "text", "value": "If you want a clear understanding of the environmental impacts of AI throughout its entire lifecycle, this primer by ", "raw": "If you want a clear understanding of the environmental impacts of AI throughout its entire lifecycle, this primer by ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@sasha", "resource": null, "url": null, "href": null, "user": "sasha", "lang": null, "code": null, "label": null }, { "type": "text", "value": " ", "raw": " ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@brunatrevelin", "resource": null, "url": null, "href": null, "user": "brunatrevelin", "lang": null, "code": null, "label": null }, { "type": "text", "value": " and ", "raw": " and ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@meg", "resource": null, "url": null, "href": null, "user": "meg", "lang": null, "code": null, "label": null }, { "type": "text", "value": " is a must-read.", "raw": " is a must-read.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "It brilliantly explains which types of impacts occur, when they happen, and why they matter.", "raw": "It brilliantly explains which types of impacts occur, when they happen, and why they matter.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/sasha/ai-environment-primer", "resource": null, "url": null, "href": "https://huggingface.co/blog/sasha/ai-environment-primer", "user": null, "lang": null, "code": null, "label": null } ]
If you want a clear understanding of the environmental impacts of AI throughout its entire lifecycle, this primer by @sasha @brunatrevelin and @meg is a must-read. It brilliantly explains which types of impacts occur, when they happen, and why they matter. https://huggingface.co/blog/sasha/ai-environment-primer
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg", "fullname": "Florent Daudens", "name": "fdaudens", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 364, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/651ea296c887c687e09158af/ju9Zx2xDBVhDLnLL1e1Mq.jpeg", "fullname": "Bruna Trevelin", "name": "brunatrevelin", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 35 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1626214544196-60c757ea5f9a76ab3f844f12.png", "fullname": "Margaret Mitchell", "name": "meg", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 96 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60edd0133e2c73a9a21455f5/yK1G-Fv-YjYb7v_chkz3p.jpeg", "fullname": "Sasha Luccioni", "name": "sasha", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 156 } ]
[ { "reaction": "โค๏ธ", "users": [ "brunatrevelin", "John6666", "not-lain", "BrigitteTousi", "louisbrulenaudet" ], "count": 5 } ]
2024-09-04T15:48:16.000Z
2024-09-04T15:48:16.042Z
[]
/posts/fdaudens/761761396766140
899
0
964871105613632
[ { "type": "text", "value": "The new Qwen-2 VL models seem to perform quite well in object detection. You can prompt them to respond with bounding boxes in a reference frame of 1k x 1k pixels and scale those boxes to the original image size.", "raw": "The new Qwen-2 VL models seem to perform quite well in object detection. You can prompt them to respond with bounding boxes in a reference frame of 1k x 1k pixels and scale those boxes to the original image size.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "You can try it out with my space ", "raw": "You can try it out with my space ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/maxiw/Qwen2-VL-Detection", "resource": { "type": "space", "id": "maxiw/Qwen2-VL-Detection", "discussionNum": null }, "url": "https://huggingface.co/spaces/maxiw/Qwen2-VL-Detection", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
The new Qwen-2 VL models seem to perform quite well in object detection. You can prompt them to respond with bounding boxes in a reference frame of 1k x 1k pixels and scale those boxes to the original image size. You can try it out with my space https://huggingface.co/spaces/maxiw/Qwen2-VL-Detection
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6313a26b2c7ffdd9f50187ed/MTBOHg2bMcuOMWFLCZ86L.png", "fullname": "Maxi", "name": "maxiw", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 48, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6313a26b2c7ffdd9f50187ed/Z23y8kJLAGbYgaF95CfyX.png" } ]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "de-Rodrigo", "dsmonk", "tosouth", "hxypqr", "mrdbourke", "SvPolina", "akazakov", "Panerlu", "iiBLACKii" ], "count": 9 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "Greenbean", "denizaybey", "YaTharThShaRma999", "rwightman" ], "count": 5 }, { "reaction": "๐Ÿค—", "users": [ "thusinh1969" ], "count": 1 } ]
2024-09-04T14:06:12.000Z
2024-10-06T08:27:03.648Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6313a26b2c7ffdd9f50187ed/MTBOHg2bMcuOMWFLCZ86L.png", "fullname": "Maxi", "name": "maxiw", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 48, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64660c1ca1a19b0623fcf84c/wKZW7gdXufDO8xJ4NsOVV.jpeg", "fullname": "YCX", "name": "fridayfairy", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/642cf7abab0cc792e43b8497/Io06Gn7ERvz2N9QMo0CBY.jpeg", "fullname": "Nguyแป…n Anh Nguyรชn", "name": "thusinh1969", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 7, "isFollowing": false } ]
/posts/maxiw/964871105613632
2,305
4
917987360905988
[ { "type": "text", "value": "A few weeks ago, we uploaded the MERIT Dataset ๐ŸŽ’๐Ÿ“ƒ๐Ÿ† into Hugging Face ๐Ÿค—!", "raw": "A few weeks ago, we uploaded the MERIT Dataset ๐ŸŽ’๐Ÿ“ƒ๐Ÿ† into Hugging Face ๐Ÿค—!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Now, we are excited to share the Merit Dataset paper via arXiv! ๐Ÿ“ƒ๐Ÿ’ซ", "raw": "Now, we are excited to share the Merit Dataset paper via arXiv! ๐Ÿ“ƒ๐Ÿ’ซ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2409.00447", "resource": { "type": "paper", "id": "2409.00447", "discussionNum": null }, "url": "https://huggingface.co/papers/2409.00447", "href": null, "user": null, "lang": null, "code": null, "label": "The MERIT Dataset: Modelling and Efficiently Rendering Interpretable\n Transcripts (2409.00447)" }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "The MERIT Dataset is a fully synthetic, labeled dataset created for training and benchmarking LLMs on Visually Rich Document Understanding tasks. It is also designed to help detect biases and improve interpretability in LLMs, where we are actively working. ๐Ÿ”ง๐Ÿ”จ", "raw": "The MERIT Dataset is a fully synthetic, labeled dataset created for training and benchmarking LLMs on Visually Rich Document Understanding tasks. It is also designed to help detect biases and improve interpretability in LLMs, where we are actively working. ๐Ÿ”ง๐Ÿ”จ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "MERIT contains synthetically rendered students' transcripts of records from different schools in English and Spanish. We plan to expand the dataset into different contexts (synth medical/insurance documents, synth IDS, etc.) Want to collaborate? Do you have any feedback? ๐Ÿง", "raw": "MERIT contains synthetically rendered students' transcripts of records from different schools in English and Spanish. We plan to expand the dataset into different contexts (synth medical/insurance documents, synth IDS, etc.) Want to collaborate? Do you have any feedback? ๐Ÿง", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Resources:", "raw": "Resources:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Dataset: ", "raw": "- Dataset: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/de-Rodrigo/merit", "resource": { "type": "dataset", "id": "de-Rodrigo/merit", "discussionNum": null }, "url": "https://huggingface.co/datasets/de-Rodrigo/merit", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Code and generation pipeline: ", "raw": "- Code and generation pipeline: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://github.com/nachoDRT/MERIT-Dataset", "resource": null, "url": null, "href": "https://github.com/nachoDRT/MERIT-Dataset", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "PD: We are grateful to Hugging Face ๐Ÿค— for providing the fantastic tools and resources we find in the platform and, more specifically, to ", "raw": "PD: We are grateful to Hugging Face ๐Ÿค— for providing the fantastic tools and resources we find in the platform and, more specifically, to ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@nielsr", "resource": null, "url": null, "href": null, "user": "nielsr", "lang": null, "code": null, "label": null }, { "type": "text", "value": " for sharing the fine-tuning/inference scripts we have used in our benchmark.", "raw": " for sharing the fine-tuning/inference scripts we have used in our benchmark.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
A few weeks ago, we uploaded the MERIT Dataset ๐ŸŽ’๐Ÿ“ƒ๐Ÿ† into Hugging Face ๐Ÿค—! Now, we are excited to share the Merit Dataset paper via arXiv! ๐Ÿ“ƒ๐Ÿ’ซ https://huggingface.co/papers/2409.00447 The MERIT Dataset is a fully synthetic, labeled dataset created for training and benchmarking LLMs on Visually Rich Document Understanding tasks. It is also designed to help detect biases and improve interpretability in LLMs, where we are actively working. ๐Ÿ”ง๐Ÿ”จ MERIT contains synthetically rendered students' transcripts of records from different schools in English and Spanish. We plan to expand the dataset into different contexts (synth medical/insurance documents, synth IDS, etc.) Want to collaborate? Do you have any feedback? ๐Ÿง Resources: - Dataset: https://huggingface.co/datasets/de-Rodrigo/merit - Code and generation pipeline: https://github.com/nachoDRT/MERIT-Dataset PD: We are grateful to Hugging Face ๐Ÿค— for providing the fantastic tools and resources we find in the platform and, more specifically, to @nielsr for sharing the fine-tuning/inference scripts we have used in our benchmark.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1676563169736-noauth.jpeg", "fullname": "de Rodrigo", "name": "de-Rodrigo", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63ee535a190ddd6214f30dc2/cdxZSF1f69iGUkmtydACh.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63ee535a190ddd6214f30dc2/Aio4dSOAFLkbSCPwz_DEO.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63ee535a190ddd6214f30dc2/QgeJUVQ07gHcMcfEBbWXm.png" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1608042047613-5f1158120c833276f61f1a84.jpeg", "fullname": "Niels Rogge", "name": "nielsr", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 669 } ]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "louisbrulenaudet" ], "count": 2 }, { "reaction": "๐Ÿ”ฅ", "users": [ "David-Egea" ], "count": 1 } ]
2024-09-04T13:30:30.000Z
2024-09-04T13:34:02.689Z
[]
/posts/de-Rodrigo/917987360905988
987
0
191046582909567
[ { "type": "mention", "value": null, "raw": "@victor", "resource": null, "url": null, "href": null, "user": "victor", "lang": null, "code": null, "label": null }, { "type": "text", "value": " Sorry for the repetitiveness.", "raw": " Sorry for the repetitiveness.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "I'm not sure if Post is the right place to report such an error, but it seems to be a server error unrelated to the Zero GPU space error the other day, so I don't know where else to report it.", "raw": "I'm not sure if Post is the right place to report such an error, but it seems to be a server error unrelated to the Zero GPU space error the other day, so I don't know where else to report it.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Since this morning, I have been getting a strange error when running inference from space in Gradio 3.x.", "raw": "Since this morning, I have been getting a strange error when running inference from space in Gradio 3.x.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Yntec (", "raw": "Yntec (", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/Yntec", "resource": null, "url": null, "href": "https://huggingface.co/Yntec", "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": ") discovered it, but he is not in the Pro subscription, so I am reporting it on behalf of him.", "raw": ") discovered it, but he is not in the Pro subscription, so I am reporting it on behalf of him.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "The error message is as follows: 1girl and other prompts will show cached output, so experiment with unusual prompts.", "raw": "The error message is as follows: 1girl and other prompts will show cached output, so experiment with unusual prompts.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Thank you in advance.", "raw": "Thank you in advance.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/John6666/blitz_diffusion_error", "resource": { "type": "space", "id": "John6666/blitz_diffusion_error", "discussionNum": null }, "url": "https://huggingface.co/spaces/John6666/blitz_diffusion_error", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/John6666/GPU-stresser-t2i-error", "resource": { "type": "space", "id": "John6666/GPU-stresser-t2i-error", "discussionNum": null }, "url": "https://huggingface.co/spaces/John6666/GPU-stresser-t2i-error", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "code_fence", "value": null, "raw": "```\nValueError: Could not complete request to HuggingFace API, Status Code: 500, Error: unknown error, Warnings: ['CUDA out of memory. Tried to allocate 30.00 MiB (GPU 0; 14.75 GiB total capacity; 1.90 GiB already allocated; 3.06 MiB free; 1.95 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF', 'There was an inference error: CUDA out of memory. Tried to allocate 30.00 MiB (GPU 0; 14.75 GiB total capacity; 1.90 GiB already allocated; 3.06 MiB free; 1.95 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF']\n```", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": "ValueError: Could not complete request to HuggingFace API, Status Code: 500, Error: unknown error, Warnings: ['CUDA out of memory. Tried to allocate 30.00 MiB (GPU 0; 14.75 GiB total capacity; 1.90 GiB already allocated; 3.06 MiB free; 1.95 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF', 'There was an inference error: CUDA out of memory. Tried to allocate 30.00 MiB (GPU 0; 14.75 GiB total capacity; 1.90 GiB already allocated; 3.06 MiB free; 1.95 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF']", "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
@victor Sorry for the repetitiveness. I'm not sure if Post is the right place to report such an error, but it seems to be a server error unrelated to the Zero GPU space error the other day, so I don't know where else to report it. Since this morning, I have been getting a strange error when running inference from space in Gradio 3.x. Yntec (https://huggingface.co/Yntec) discovered it, but he is not in the Pro subscription, so I am reporting it on behalf of him. The error message is as follows: 1girl and other prompts will show cached output, so experiment with unusual prompts. Thank you in advance. https://huggingface.co/spaces/John6666/blitz_diffusion_error https://huggingface.co/spaces/John6666/GPU-stresser-t2i-error ``` ValueError: Could not complete request to HuggingFace API, Status Code: 500, Error: unknown error, Warnings: ['CUDA out of memory. Tried to allocate 30.00 MiB (GPU 0; 14.75 GiB total capacity; 1.90 GiB already allocated; 3.06 MiB free; 1.95 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF', 'There was an inference error: CUDA out of memory. Tried to allocate 30.00 MiB (GPU 0; 14.75 GiB total capacity; 1.90 GiB already allocated; 3.06 MiB free; 1.95 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF'] ```
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 384, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg", "fullname": "Victor Mustar", "name": "victor", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 2578 } ]
[ { "reaction": "๐Ÿ‘€", "users": [ "victor", "julien-c", "AtAndDev" ], "count": 3 } ]
2024-09-04T12:58:53.000Z
2024-09-09T10:51:25.537Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63239b8370edc53f51cd5d42/88od0k-AAkxAIV-5ULwDs.png", "fullname": "Yn Tec", "name": "Yntec", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1994, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg", "fullname": "Victor Mustar", "name": "victor", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 2578, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 384, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1621947938344-noauth.png", "fullname": "Abubakar Abid", "name": "abidlabs", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 482, "isFollowing": false }, { "avatarUrl": "/avatars/6bd14f36bf31ddc8c86cddd6d39d920e.svg", "fullname": "Juandiego Morzan", "name": "jdmorzan", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/John6666/191046582909567
8,762
14
395501387301708
[ { "type": "text", "value": "I am integrating Azure Cosmos DB, the database system that backs GPT conversations into my workflow, and experimenting with new patterns to accelerate dataset evolution for evaluation and training of AI.", "raw": "I am integrating Azure Cosmos DB, the database system that backs GPT conversations into my workflow, and experimenting with new patterns to accelerate dataset evolution for evaluation and training of AI.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "While initially using it for research prompts and research outputs using my GPT-4o client here which can interface and search ArXiv, I am excited to try out some new features specifically for AI at scale. Research on memory augmentation is shown. ", "raw": "While initially using it for research prompts and research outputs using my GPT-4o client here which can interface and search ArXiv, I am excited to try out some new features specifically for AI at scale. Research on memory augmentation is shown. ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/awacke1/GPT-4o-omni-text-audio-image-video", "resource": { "type": "space", "id": "awacke1/GPT-4o-omni-text-audio-image-video", "discussionNum": null }, "url": "https://huggingface.co/spaces/awacke1/GPT-4o-omni-text-audio-image-video", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " ", "raw": " ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/awacke1/AzureCosmosDBUI", "resource": { "type": "space", "id": "awacke1/AzureCosmosDBUI", "discussionNum": null }, "url": "https://huggingface.co/spaces/awacke1/AzureCosmosDBUI", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " ", "raw": " ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
I am integrating Azure Cosmos DB, the database system that backs GPT conversations into my workflow, and experimenting with new patterns to accelerate dataset evolution for evaluation and training of AI. While initially using it for research prompts and research outputs using my GPT-4o client here which can interface and search ArXiv, I am excited to try out some new features specifically for AI at scale. Research on memory augmentation is shown. https://huggingface.co/spaces/awacke1/GPT-4o-omni-text-audio-image-video https://huggingface.co/spaces/awacke1/AzureCosmosDBUI
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1656147940537-620630b603825909dcbeba35.jpeg", "fullname": "Aaron C Wacker", "name": "awacke1", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 184, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/7YNtYZ38tpsms_UklntR1.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-09-04T11:44:05.000Z
2024-09-04T11:44:05.531Z
[]
/posts/awacke1/395501387301708
588
0
506001462483816
[ { "type": "text", "value": "๐Ÿš€ Introducing Hugging Face's Multilingual Speech-to-Speech! ๐ŸŽค", "raw": "๐Ÿš€ Introducing Hugging Face's Multilingual Speech-to-Speech! ๐ŸŽค", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ’ฌOur modular, cross-platform pipeline to run GPT4o-like experiences on device can now seamlessly switch languages mid-conversation with an imperceptible 100ms delay.", "raw": "๐Ÿ’ฌOur modular, cross-platform pipeline to run GPT4o-like experiences on device can now seamlessly switch languages mid-conversation with an imperceptible 100ms delay.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐ŸŒŸ Building on an amazing early reception with 2600 stars on GitHub ๐ŸŒŸ ", "raw": "๐ŸŒŸ Building on an amazing early reception with 2600 stars on GitHub ๐ŸŒŸ ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿš€ We are expanding the library to support multiple languages ", "raw": "๐Ÿš€ We are expanding the library to support multiple languages ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ”ฅ Try it out with a flag: --language fr ", "raw": "๐Ÿ”ฅ Try it out with a flag: --language fr ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿคฏ Or don't set the flag and let the system detect the language ", "raw": "๐Ÿคฏ Or don't set the flag and let the system detect the language ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ’ก What feature should we add next?", "raw": "๐Ÿ’ก What feature should we add next?", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
๐Ÿš€ Introducing Hugging Face's Multilingual Speech-to-Speech! ๐ŸŽค ๐Ÿ’ฌOur modular, cross-platform pipeline to run GPT4o-like experiences on device can now seamlessly switch languages mid-conversation with an imperceptible 100ms delay. ๐ŸŒŸ Building on an amazing early reception with 2600 stars on GitHub ๐ŸŒŸ ๐Ÿš€ We are expanding the library to support multiple languages ๐Ÿ”ฅ Try it out with a flag: --language fr ๐Ÿคฏ Or don't set the flag and let the system detect the language ๐Ÿ’ก What feature should we add next?
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65d66b494bbd0d92b641cdbb/6-7dm7B-JxcoS1QlCPdMN.jpeg", "fullname": "Andres Marafioti", "name": "andito", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 53, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/65d66b494bbd0d92b641cdbb/WbpkWi8OlJGXnL1kzmcqK.mp4" } ]
[]
[ { "reaction": "๐Ÿค—", "users": [ "prithivMLmods", "osanseviero", "John6666", "THEFIG" ], "count": 4 }, { "reaction": "๐Ÿ˜Ž", "users": [ "de-Rodrigo" ], "count": 1 }, { "reaction": "๐Ÿ”ฅ", "users": [ "Aurelien-Morgan" ], "count": 1 }, { "reaction": "๐Ÿ‘", "users": [ "dashfunnydashdash" ], "count": 1 } ]
2024-09-04T07:54:29.000Z
2024-09-04T07:54:44.640Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65d66b494bbd0d92b641cdbb/6-7dm7B-JxcoS1QlCPdMN.jpeg", "fullname": "Andres Marafioti", "name": "andito", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 53, "isFollowing": false } ]
/posts/andito/506001462483816
1,573
1
314529831042259
[ { "type": "text", "value": "๐Ÿงญ Guided Reasoning", "raw": "๐Ÿงญ Guided Reasoning", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ‘‹Hi everyone, ", "raw": "๐Ÿ‘‹Hi everyone, ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "We've been releasing Guided Reasoning:", "raw": "We've been releasing Guided Reasoning:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Our AI guides walk your favorite LLM through complex reasoning problems.", "raw": "Our AI guides walk your favorite LLM through complex reasoning problems.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐ŸŽฏ Goals:", "raw": "๐ŸŽฏ Goals:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "1๏ธโƒฃ Reliability. AIs consistently follow reasoning methods.", "raw": "1๏ธโƒฃ Reliability. AIs consistently follow reasoning methods.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "2๏ธโƒฃ Self-explainability. AIs see reasoning protocols and can explain internal deliberation.", "raw": "2๏ธโƒฃ Self-explainability. AIs see reasoning protocols and can explain internal deliberation.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "3๏ธโƒฃ Contestability. Users may amend AI reasoning and revise plausibility assessments.", "raw": "3๏ธโƒฃ Contestability. Users may amend AI reasoning and revise plausibility assessments.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Try out Guided Reasoning with our light demo chatbot, powered by ๐Ÿค— HuggingFace's free Inference Api and small LLMs. (Sorry for poor latency and limited availability -- we are currently searching for ๐Ÿ’ธ compute sponsors to run more powerful models, faster, and optimize guided reasoning performance.)", "raw": "Try out Guided Reasoning with our light demo chatbot, powered by ๐Ÿค— HuggingFace's free Inference Api and small LLMs. (Sorry for poor latency and limited availability -- we are currently searching for ๐Ÿ’ธ compute sponsors to run more powerful models, faster, and optimize guided reasoning performance.)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Built on top of Logikon's open-source AI reasoning analytics.", "raw": "Built on top of Logikon's open-source AI reasoning analytics.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Demo chat app: ", "raw": "Demo chat app: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/logikon/benjamin-chat", "resource": { "type": "space", "id": "logikon/benjamin-chat", "discussionNum": null }, "url": "https://huggingface.co/spaces/logikon/benjamin-chat", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Github: ", "raw": "Github: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://github.com/logikon-ai/logikon", "resource": null, "url": null, "href": "https://github.com/logikon-ai/logikon", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Technical report: ", "raw": "Technical report: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/abs/2408.16331", "resource": null, "url": null, "href": "https://arxiv.org/abs/2408.16331", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "โžก๏ธ Check it out and get involved! Looking forward to hearing from you.", "raw": "โžก๏ธ Check it out and get involved! Looking forward to hearing from you.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
๐Ÿงญ Guided Reasoning ๐Ÿ‘‹Hi everyone, We've been releasing Guided Reasoning: Our AI guides walk your favorite LLM through complex reasoning problems. ๐ŸŽฏ Goals: 1๏ธโƒฃ Reliability. AIs consistently follow reasoning methods. 2๏ธโƒฃ Self-explainability. AIs see reasoning protocols and can explain internal deliberation. 3๏ธโƒฃ Contestability. Users may amend AI reasoning and revise plausibility assessments. Try out Guided Reasoning with our light demo chatbot, powered by ๐Ÿค— HuggingFace's free Inference Api and small LLMs. (Sorry for poor latency and limited availability -- we are currently searching for ๐Ÿ’ธ compute sponsors to run more powerful models, faster, and optimize guided reasoning performance.) Built on top of Logikon's open-source AI reasoning analytics. Demo chat app: https://huggingface.co/spaces/logikon/benjamin-chat Github: https://github.com/logikon-ai/logikon Technical report: https://arxiv.org/abs/2408.16331 โžก๏ธ Check it out and get involved! Looking forward to hearing from you.
{ "avatarUrl": "/avatars/78be882adf32b808686713e9b457797d.svg", "fullname": "Gregor Betz", "name": "ggbetz", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 4, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "andito", "reuank", "scacean" ], "count": 3 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-09-04T07:23:45.000Z
2024-09-04T07:26:21.336Z
[]
/posts/ggbetz/314529831042259
1,137
0
182312801833822
[ { "type": "text", "value": " Fine-tuned Phi-3.5 Chatbot", "raw": " Fine-tuned Phi-3.5 Chatbot", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "This project presents a fine-tuned version of Microsoft's Phi-3.5 model, optimized for enhanced conversational abilities and general knowledge tasks.", "raw": "This project presents a fine-tuned version of Microsoft's Phi-3.5 model, optimized for enhanced conversational abilities and general knowledge tasks.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Model Details", "raw": "Model Details", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Base model: microsoft/Phi-3.5-mini-instruct", "raw": "- Base model: microsoft/Phi-3.5-mini-instruct", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Fine-tuning method: PEFT (Parameter-Efficient Fine-Tuning)", "raw": "- Fine-tuning method: PEFT (Parameter-Efficient Fine-Tuning)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Training data: [Brief description of your dataset]", "raw": "- Training data: [Brief description of your dataset]", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " Features", "raw": " Features", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Improved response generation for a wide range of topics", "raw": "- Improved response generation for a wide range of topics", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Enhanced context understanding and coherence", "raw": "- Enhanced context understanding and coherence", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Optimized for deployment on Hugging Face Spaces", "raw": "- Optimized for deployment on Hugging Face Spaces", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Usage", "raw": "Usage", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "This model can be used for various natural language processing tasks, including:", "raw": "This model can be used for various natural language processing tasks, including:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- General conversation", "raw": "- General conversation", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Question answering", "raw": "- Question answering", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Task instructions", "raw": "- Task instructions", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Creative writing", "raw": "- Creative writing", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Try out the model here : ", "raw": "Try out the model here : ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " ", "raw": " ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/sagar007/phi3.5_mini_instruct_finetune", "resource": { "type": "space", "id": "sagar007/phi3.5_mini_instruct_finetune", "discussionNum": null }, "url": "https://huggingface.co/spaces/sagar007/phi3.5_mini_instruct_finetune", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Limitations", "raw": "Limitations", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "While this fine-tuned model shows improved performance, users should be aware of potential biases and limitations inherent in language models. Always critically evaluate the model's outputs.", "raw": "While this fine-tuned model shows improved performance, users should be aware of potential biases and limitations inherent in language models. Always critically evaluate the model's outputs.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " Feedback", "raw": " Feedback", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "I welcome any feedback, suggestions, or questions about this project. Feel free to open an issue or contribute to further improvements!", "raw": "I welcome any feedback, suggestions, or questions about this project. Feel free to open an issue or contribute to further improvements!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "#Phi35 #FineTuning #NLP #MachineLearning #HuggingFace", "raw": "#Phi35 #FineTuning #NLP #MachineLearning #HuggingFace", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
Fine-tuned Phi-3.5 Chatbot This project presents a fine-tuned version of Microsoft's Phi-3.5 model, optimized for enhanced conversational abilities and general knowledge tasks. Model Details - Base model: microsoft/Phi-3.5-mini-instruct - Fine-tuning method: PEFT (Parameter-Efficient Fine-Tuning) - Training data: [Brief description of your dataset] Features - Improved response generation for a wide range of topics - Enhanced context understanding and coherence - Optimized for deployment on Hugging Face Spaces Usage This model can be used for various natural language processing tasks, including: - General conversation - Question answering - Task instructions - Creative writing Try out the model here : https://huggingface.co/spaces/sagar007/phi3.5_mini_instruct_finetune Limitations While this fine-tuned model shows improved performance, users should be aware of potential biases and limitations inherent in language models. Always critically evaluate the model's outputs. Feedback I welcome any feedback, suggestions, or questions about this project. Feel free to open an issue or contribute to further improvements! #Phi35 #FineTuning #NLP #MachineLearning #HuggingFace
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a464cfe0de0c5c6d8b04a1/1gCs46R_bW9apQzLQUrn5.png", "fullname": "Sagar pallai", "name": "sagar007", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 8, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/62a464cfe0de0c5c6d8b04a1/U6PEKRIi2Dk8PfHT5Syyd.webp" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-09-04T05:28:56.000Z
2024-09-05T05:40:08.424Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6569216f9c96f1a47bf45788/mCLqmAs4dOjKdxNQVAp1w.png", "fullname": "Sica Rius", "name": "SicariusSicariiStuff", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 132, "isFollowing": false } ]
/posts/sagar007/182312801833822
461
1
329890206827914
[ { "type": "text", "value": "Hyperfast Contextual Custom LLM with Agents, Multitokens, Explainable AI, and Distillation ", "raw": "Hyperfast Contextual Custom LLM with Agents, Multitokens, Explainable AI, and Distillation ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://mltblog.com/4dNPSnB", "resource": null, "url": null, "href": "https://mltblog.com/4dNPSnB", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "New additions to this ground-breaking system include multi-token distillation when processing prompts, agents to meet user intent, more NLP, and a command prompt menu accepting both standard prompts and various actions.", "raw": "New additions to this ground-breaking system include multi-token distillation when processing prompts, agents to meet user intent, more NLP, and a command prompt menu accepting both standard prompts and various actions.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "I also added several illustrations, featuring xLLM in action with a full session and sample commands to fine-tune in real-time. All the code, input sources (anonymized corporate corpus from fortune 100 company), contextual backend tables including embeddings, are on GitHub. My system has zero weight, no transformer, and no neural network. It relies on explainable AI, does not require training, is fully reproducible, and fits in memory. Yet your prompts can retrieve relevant full text entities from the corpus with no latency โ€” including URLs, categories, titles, email addresses, and so on โ€” thanks to well-designed architecture.", "raw": "I also added several illustrations, featuring xLLM in action with a full session and sample commands to fine-tune in real-time. All the code, input sources (anonymized corporate corpus from fortune 100 company), contextual backend tables including embeddings, are on GitHub. My system has zero weight, no transformer, and no neural network. It relies on explainable AI, does not require training, is fully reproducible, and fits in memory. Yet your prompts can retrieve relevant full text entities from the corpus with no latency โ€” including URLs, categories, titles, email addresses, and so on โ€” thanks to well-designed architecture.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Read more, get the code, paper and everything for free, at ", "raw": "Read more, get the code, paper and everything for free, at ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://mltblog.com/4dNPSnB", "resource": null, "url": null, "href": "https://mltblog.com/4dNPSnB", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
Hyperfast Contextual Custom LLM with Agents, Multitokens, Explainable AI, and Distillation https://mltblog.com/4dNPSnB New additions to this ground-breaking system include multi-token distillation when processing prompts, agents to meet user intent, more NLP, and a command prompt menu accepting both standard prompts and various actions. I also added several illustrations, featuring xLLM in action with a full session and sample commands to fine-tune in real-time. All the code, input sources (anonymized corporate corpus from fortune 100 company), contextual backend tables including embeddings, are on GitHub. My system has zero weight, no transformer, and no neural network. It relies on explainable AI, does not require training, is fully reproducible, and fits in memory. Yet your prompts can retrieve relevant full text entities from the corpus with no latency โ€” including URLs, categories, titles, email addresses, and so on โ€” thanks to well-designed architecture. Read more, get the code, paper and everything for free, at https://mltblog.com/4dNPSnB
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/669c89e98f2dbc203f9e74ab/higvnXEHeo_Ig2bgTpn47.png", "fullname": "Vincent Granville", "name": "vincentg64", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 17, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/669c89e98f2dbc203f9e74ab/ZlwkNzh2GnMNGKVJASNfN.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "djuna", "Bruhn", "lilcheaty" ], "count": 4 }, { "reaction": "โค๏ธ", "users": [ "StephenGenusa" ], "count": 1 } ]
2024-09-03T16:49:54.000Z
2024-09-05T18:53:58.943Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662162fd296b3d40f15367a4/jM74dtHuAGI6UlLGT7A9s.jpeg", "fullname": "Stephen Genusa", "name": "StephenGenusa", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/669c89e98f2dbc203f9e74ab/higvnXEHeo_Ig2bgTpn47.png", "fullname": "Vincent Granville", "name": "vincentg64", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 17, "isFollowing": false } ]
/posts/vincentg64/329890206827914
1,448
2
407079979685500
[ { "type": "text", "value": "๐Ÿšจ ๐—›๐˜‚๐—บ๐—ฎ๐—ป ๐—™๐—ฒ๐—ฒ๐—ฑ๐—ฏ๐—ฎ๐—ฐ๐—ธ ๐—ณ๐—ผ๐—ฟ ๐—”๐—œ ๐˜๐—ฟ๐—ฎ๐—ถ๐—ป๐—ถ๐—ป๐—ด: ๐—ก๐—ผ๐˜ ๐˜๐—ต๐—ฒ ๐—ด๐—ผ๐—น๐—ฑ๐—ฒ๐—ป ๐—ด๐—ผ๐—ผ๐˜€๐—ฒ ๐˜„๐—ฒ ๐˜๐—ต๐—ผ๐˜‚๐—ด๐—ต๐˜?", "raw": "๐Ÿšจ ๐—›๐˜‚๐—บ๐—ฎ๐—ป ๐—™๐—ฒ๐—ฒ๐—ฑ๐—ฏ๐—ฎ๐—ฐ๐—ธ ๐—ณ๐—ผ๐—ฟ ๐—”๐—œ ๐˜๐—ฟ๐—ฎ๐—ถ๐—ป๐—ถ๐—ป๐—ด: ๐—ก๐—ผ๐˜ ๐˜๐—ต๐—ฒ ๐—ด๐—ผ๐—น๐—ฑ๐—ฒ๐—ป ๐—ด๐—ผ๐—ผ๐˜€๐—ฒ ๐˜„๐—ฒ ๐˜๐—ต๐—ผ๐˜‚๐—ด๐—ต๐˜?", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Iโ€™ve just read a great paper where Cohere researchers raises significant questions about using Human feedback to evaluate AI language models.", "raw": "Iโ€™ve just read a great paper where Cohere researchers raises significant questions about using Human feedback to evaluate AI language models.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Human feedback is often regarded as the gold standard for judging AI performance, but it turns out, it might be more like fool's gold : the study reveals that our human judgments are easily swayed by factors that have nothing to do with actual AI performance.", "raw": "Human feedback is often regarded as the gold standard for judging AI performance, but it turns out, it might be more like fool's gold : the study reveals that our human judgments are easily swayed by factors that have nothing to do with actual AI performance.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐—ž๐—ฒ๐˜† ๐—ถ๐—ป๐˜€๐—ถ๐—ด๐—ต๐˜๐˜€:", "raw": "๐—ž๐—ฒ๐˜† ๐—ถ๐—ป๐˜€๐—ถ๐—ด๐—ต๐˜๐˜€:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿง  Test several models: Llama-2, Falcon-40B, Cohere Command 6 and 52B ๐Ÿ™…โ€โ™‚๏ธ Refusing to answer tanks AI ratings more than getting facts wrong. We apparently prefer a wrong answer to no answer!", "raw": "๐Ÿง  Test several models: Llama-2, Falcon-40B, Cohere Command 6 and 52B ๐Ÿ™…โ€โ™‚๏ธ Refusing to answer tanks AI ratings more than getting facts wrong. We apparently prefer a wrong answer to no answer!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ’ช Confidence is key (even when it shouldn't be): More assertive AI responses are seen as more factual, even when they're not. This could be pushing AI development in the wrong direction, with systems like RLHF.", "raw": "๐Ÿ’ช Confidence is key (even when it shouldn't be): More assertive AI responses are seen as more factual, even when they're not. This could be pushing AI development in the wrong direction, with systems like RLHF.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐ŸŽญ The assertiveness trap: As AI responses get more confident-sounding, non-expert annotators become less likely to notice when they're wrong or inconsistent.", "raw": "๐ŸŽญ The assertiveness trap: As AI responses get more confident-sounding, non-expert annotators become less likely to notice when they're wrong or inconsistent.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "And a consequence of the above:", "raw": "And a consequence of the above:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ”„ ๐—ฅ๐—Ÿ๐—›๐—™ ๐—บ๐—ถ๐—ด๐—ต๐˜ ๐—ฏ๐—ฎ๐—ฐ๐—ธ๐—ณ๐—ถ๐—ฟ๐—ฒ: Using human feedback to train AI (Reinforcement Learning from Human Feedback) could accidentally make AI more overconfident and less accurate.", "raw": "๐Ÿ”„ ๐—ฅ๐—Ÿ๐—›๐—™ ๐—บ๐—ถ๐—ด๐—ต๐˜ ๐—ฏ๐—ฎ๐—ฐ๐—ธ๐—ณ๐—ถ๐—ฟ๐—ฒ: Using human feedback to train AI (Reinforcement Learning from Human Feedback) could accidentally make AI more overconfident and less accurate.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "This paper means we need to think carefully about how we evaluate and train AI systems to ensure we're rewarding correctness over apparences of it like confident talk.", "raw": "This paper means we need to think carefully about how we evaluate and train AI systems to ensure we're rewarding correctness over apparences of it like confident talk.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "โ›”๏ธ Chatbot Arenaโ€™s ELO leaderboard, based on crowdsourced answers from average joes like you and me, might become completely irrelevant as models will become smarter and smarter.", "raw": "โ›”๏ธ Chatbot Arenaโ€™s ELO leaderboard, based on crowdsourced answers from average joes like you and me, might become completely irrelevant as models will become smarter and smarter.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Read the paper ๐Ÿ‘‰ ", "raw": "Read the paper ๐Ÿ‘‰ ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2309.16349", "resource": { "type": "paper", "id": "2309.16349", "discussionNum": null }, "url": "https://huggingface.co/papers/2309.16349", "href": null, "user": null, "lang": null, "code": null, "label": "Human Feedback is not Gold Standard (2309.16349)" } ]
๐Ÿšจ ๐—›๐˜‚๐—บ๐—ฎ๐—ป ๐—™๐—ฒ๐—ฒ๐—ฑ๐—ฏ๐—ฎ๐—ฐ๐—ธ ๐—ณ๐—ผ๐—ฟ ๐—”๐—œ ๐˜๐—ฟ๐—ฎ๐—ถ๐—ป๐—ถ๐—ป๐—ด: ๐—ก๐—ผ๐˜ ๐˜๐—ต๐—ฒ ๐—ด๐—ผ๐—น๐—ฑ๐—ฒ๐—ป ๐—ด๐—ผ๐—ผ๐˜€๐—ฒ ๐˜„๐—ฒ ๐˜๐—ต๐—ผ๐˜‚๐—ด๐—ต๐˜? Iโ€™ve just read a great paper where Cohere researchers raises significant questions about using Human feedback to evaluate AI language models. Human feedback is often regarded as the gold standard for judging AI performance, but it turns out, it might be more like fool's gold : the study reveals that our human judgments are easily swayed by factors that have nothing to do with actual AI performance. ๐—ž๐—ฒ๐˜† ๐—ถ๐—ป๐˜€๐—ถ๐—ด๐—ต๐˜๐˜€: ๐Ÿง  Test several models: Llama-2, Falcon-40B, Cohere Command 6 and 52B ๐Ÿ™…โ€โ™‚๏ธ Refusing to answer tanks AI ratings more than getting facts wrong. We apparently prefer a wrong answer to no answer! ๐Ÿ’ช Confidence is key (even when it shouldn't be): More assertive AI responses are seen as more factual, even when they're not. This could be pushing AI development in the wrong direction, with systems like RLHF. ๐ŸŽญ The assertiveness trap: As AI responses get more confident-sounding, non-expert annotators become less likely to notice when they're wrong or inconsistent. And a consequence of the above: ๐Ÿ”„ ๐—ฅ๐—Ÿ๐—›๐—™ ๐—บ๐—ถ๐—ด๐—ต๐˜ ๐—ฏ๐—ฎ๐—ฐ๐—ธ๐—ณ๐—ถ๐—ฟ๐—ฒ: Using human feedback to train AI (Reinforcement Learning from Human Feedback) could accidentally make AI more overconfident and less accurate. This paper means we need to think carefully about how we evaluate and train AI systems to ensure we're rewarding correctness over apparences of it like confident talk. โ›”๏ธ Chatbot Arenaโ€™s ELO leaderboard, based on crowdsourced answers from average joes like you and me, might become completely irrelevant as models will become smarter and smarter. Read the paper ๐Ÿ‘‰ https://huggingface.co/papers/2309.16349
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg", "fullname": "Aymeric Roucher", "name": "m-ric", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 476, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/ZeAJhy5RG9F0knqMsqwee.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "louisbrulenaudet" ], "count": 2 } ]
2024-09-03T14:45:11.000Z
2024-09-03T14:45:11.422Z
[]
/posts/m-ric/407079979685500
808
0
440844864868620
[ { "type": "text", "value": "Is AIโ€™s impact on elections being overblown? Three researchers think so in this opinion piece published in the MIT Tech Review.", "raw": "Is AIโ€™s impact on elections being overblown? Three researchers think so in this opinion piece published in the MIT Tech Review.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Highlights:", "raw": "Highlights:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "โ€ข\tโ€œAI is being used to try to influence electoral processes, but these efforts have not been fruitful.โ€", "raw": "โ€ข\tโ€œAI is being used to try to influence electoral processes, but these efforts have not been fruitful.โ€", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "โ€ข\tโ€œWhy were these initial speculations about AI-enabled electoral interference so off (โ€ฆ) ? The short answer: Because they ignored decades of research on the limited influence of mass persuasion campaigns, the complex determinants of voting behaviors, and the indirect and human-mediated causal role of technology.โ€", "raw": "โ€ข\tโ€œWhy were these initial speculations about AI-enabled electoral interference so off (โ€ฆ) ? The short answer: Because they ignored decades of research on the limited influence of mass persuasion campaigns, the complex determinants of voting behaviors, and the indirect and human-mediated causal role of technology.โ€", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "โ€ข\tโ€œYet we should remember that thereโ€™s a cost to overreaction based on ill-founded assumptions, especially when other critical issues go unaddressed.โ€", "raw": "โ€ข\tโ€œYet we should remember that thereโ€™s a cost to overreaction based on ill-founded assumptions, especially when other critical issues go unaddressed.โ€", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ‘‰Read more here: ", "raw": "๐Ÿ‘‰Read more here: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://technologyreview.com/2024/09/03/1103464/ai-impact-elections-overblown/", "resource": null, "url": null, "href": "https://technologyreview.com/2024/09/03/1103464/ai-impact-elections-overblown/", "user": null, "lang": null, "code": null, "label": null } ]
Is AIโ€™s impact on elections being overblown? Three researchers think so in this opinion piece published in the MIT Tech Review. Highlights: โ€ข โ€œAI is being used to try to influence electoral processes, but these efforts have not been fruitful.โ€ โ€ข โ€œWhy were these initial speculations about AI-enabled electoral interference so off (โ€ฆ) ? The short answer: Because they ignored decades of research on the limited influence of mass persuasion campaigns, the complex determinants of voting behaviors, and the indirect and human-mediated causal role of technology.โ€ โ€ข โ€œYet we should remember that thereโ€™s a cost to overreaction based on ill-founded assumptions, especially when other critical issues go unaddressed.โ€ ๐Ÿ‘‰Read more here: https://technologyreview.com/2024/09/03/1103464/ai-impact-elections-overblown/
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg", "fullname": "Florent Daudens", "name": "fdaudens", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 364, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "BrigitteTousi", "jsulz", "alielfilali01" ], "count": 4 }, { "reaction": "๐Ÿง ", "users": [ "alielfilali01", "louisbrulenaudet" ], "count": 2 } ]
2024-09-03T13:44:50.000Z
2024-09-03T13:44:50.385Z
[]
/posts/fdaudens/440844864868620
1,537
0
513925031707884
[ { "type": "text", "value": "The Forward-Forward Algorithm๐Ÿค–", "raw": "The Forward-Forward Algorithm๐Ÿค–", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "FFA replaces the forward and backward passes in backpropagtion with two forward passes - one with positive (real) data and another with negative data. Each layer has its objective function - to increase or decrease a โ€œgoodness\" metric. The positive pass uses real data and adjusts weights to increase โ€œgoodnessโ€ in every hidden layer. The negative pass does the opposite. ", "raw": "FFA replaces the forward and backward passes in backpropagtion with two forward passes - one with positive (real) data and another with negative data. Each layer has its objective function - to increase or decrease a โ€œgoodness\" metric. The positive pass uses real data and adjusts weights to increase โ€œgoodnessโ€ in every hidden layer. The negative pass does the opposite. ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "I must say reading&Implementing a godfather paper feels quite fulfilling:)", "raw": "I must say reading&Implementing a godfather paper feels quite fulfilling:)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Thank you Prof. Geoffrey Hinton.", "raw": "Thank you Prof. Geoffrey Hinton.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Code: ", "raw": "Code: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://github.com/Jaykef/ai-algorithms/blob/main/mnist_the_forward_forward_algorithm.ipynb", "resource": null, "url": null, "href": "https://github.com/Jaykef/ai-algorithms/blob/main/mnist_the_forward_forward_algorithm.ipynb", "user": null, "lang": null, "code": null, "label": null } ]
The Forward-Forward Algorithm๐Ÿค– FFA replaces the forward and backward passes in backpropagtion with two forward passes - one with positive (real) data and another with negative data. Each layer has its objective function - to increase or decrease a โ€œgoodness" metric. The positive pass uses real data and adjusts weights to increase โ€œgoodnessโ€ in every hidden layer. The negative pass does the opposite. I must say reading&Implementing a godfather paper feels quite fulfilling:) Thank you Prof. Geoffrey Hinton. Code: https://github.com/Jaykef/ai-algorithms/blob/main/mnist_the_forward_forward_algorithm.ipynb
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg", "fullname": "Jaward Sesay", "name": "Jaward", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 189, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/Fm7L4314h2q8rzjOpRQJJ.mp4" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/cNFytChGQoSCw4z7B0x80.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/cxgrGBBpgO1cKLOzo7yQp.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/RMDyDc7_RhfW9yeH4MJJD.jpeg" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 }, { "reaction": "๐Ÿ‘", "users": [ "GigaBoy" ], "count": 1 } ]
2024-09-03T11:34:26.000Z
2024-09-03T11:34:26.783Z
[]
/posts/Jaward/513925031707884
550
0
865363319225333
[ { "type": "text", "value": "๐Œ๐ฒ ๐Ÿ๐ข๐ซ๐ฌ๐ญ ๐œ๐จ๐ฆ๐ฆ๐ฎ๐ง๐ข๐ญ๐ฒ ๐š๐ซ๐ญ๐ข๐œ๐ฅ๐ž! ๐’๐ž๐ฅ๐ž๐œ๐ญ๐ข๐ฏ๐ž ๐Ÿ๐ข๐ง๐ž-๐ญ๐ฎ๐ง๐ข๐ง๐  ๐ฐ๐ข๐ญ๐ก ๐’๐ฉ๐ž๐œ๐ญ๐ซ๐ฎ๐ฆ ๐ŸŽฏ ", "raw": "๐Œ๐ฒ ๐Ÿ๐ข๐ซ๐ฌ๐ญ ๐œ๐จ๐ฆ๐ฆ๐ฎ๐ง๐ข๐ญ๐ฒ ๐š๐ซ๐ญ๐ข๐œ๐ฅ๐ž! ๐’๐ž๐ฅ๐ž๐œ๐ญ๐ข๐ฏ๐ž ๐Ÿ๐ข๐ง๐ž-๐ญ๐ฎ๐ง๐ข๐ง๐  ๐ฐ๐ข๐ญ๐ก ๐’๐ฉ๐ž๐œ๐ญ๐ซ๐ฎ๐ฆ ๐ŸŽฏ ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Full walkthrough on how to get started with Spectrum and TRL for efficient fine-tuning.", "raw": "Full walkthrough on how to get started with Spectrum and TRL for efficient fine-tuning.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ“” ๐Ÿ‘ฃ ", "raw": "๐Ÿ“” ๐Ÿ‘ฃ ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/anakin87/spectrum", "resource": null, "url": null, "href": "https://huggingface.co/blog/anakin87/spectrum", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "---", "raw": "---", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Looking to fine-tune Language Models efficiently and save on computational resources?", "raw": "Looking to fine-tune Language Models efficiently and save on computational resources?", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "One popular method is QLoRa, which quantizes the original model and trains low-rank adapters on top.", "raw": "One popular method is QLoRa, which quantizes the original model and trains low-rank adapters on top.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "It's quite effective and uses less GPU than full fine-tuning.", "raw": "It's quite effective and uses less GPU than full fine-tuning.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "However, QLoRa applies Low-Rank Adaptation uniformly across the entire model.", "raw": "However, QLoRa applies Low-Rank Adaptation uniformly across the entire model.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "What if we could identify the most informative layers and only fine-tune those? ๐Ÿค”", "raw": "What if we could identify the most informative layers and only fine-tune those? ๐Ÿค”", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "This is exactly what Spectrum does! ๐Ÿ‘‡", "raw": "This is exactly what Spectrum does! ๐Ÿ‘‡", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ”ฌ Spectrum analyzes the weight matrices for all layers in a Language Model and calculates a Signal to Noise Ratio (SNR) for each one.", "raw": "๐Ÿ”ฌ Spectrum analyzes the weight matrices for all layers in a Language Model and calculates a Signal to Noise Ratio (SNR) for each one.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "(It uses Random Matrix Theory and Marchenko-Pastur distribution to distinguish signal from noise.)", "raw": "(It uses Random Matrix Theory and Marchenko-Pastur distribution to distinguish signal from noise.)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐ŸŽฏ Based on a chosen percentage (say, 25%), Spectrum selects the most informative layers of each type (mlp.down_proj, self_attn.o_proj, etc.).", "raw": "๐ŸŽฏ Based on a chosen percentage (say, 25%), Spectrum selects the most informative layers of each type (mlp.down_proj, self_attn.o_proj, etc.).", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "You can then โ„๏ธ freeze the rest of the model and focus your ๐Ÿ‹๏ธโ€โ™‚๏ธ training on the chosen layers.", "raw": "You can then โ„๏ธ freeze the rest of the model and focus your ๐Ÿ‹๏ธโ€โ™‚๏ธ training on the chosen layers.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ† Results/Evaluation", "raw": "๐Ÿ† Results/Evaluation", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Spectrum is competitive with full fine-tuning and beats QLoRA on benchmarks.", "raw": "- Spectrum is competitive with full fine-tuning and beats QLoRA on benchmarks.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- While QLoRA is more memory-efficient on a single GPU, Spectrum shines in distributed training setups.", "raw": "- While QLoRA is more memory-efficient on a single GPU, Spectrum shines in distributed training setups.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Great models trained with Spectrum: Dolphin models, Llama 3.1 Storm, numerous models by VAGO Solutions...", "raw": "- Great models trained with Spectrum: Dolphin models, Llama 3.1 Storm, numerous models by VAGO Solutions...", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "---", "raw": "---", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "For a practical guide, check out the article above.", "raw": "For a practical guide, check out the article above.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
๐Œ๐ฒ ๐Ÿ๐ข๐ซ๐ฌ๐ญ ๐œ๐จ๐ฆ๐ฆ๐ฎ๐ง๐ข๐ญ๐ฒ ๐š๐ซ๐ญ๐ข๐œ๐ฅ๐ž! ๐’๐ž๐ฅ๐ž๐œ๐ญ๐ข๐ฏ๐ž ๐Ÿ๐ข๐ง๐ž-๐ญ๐ฎ๐ง๐ข๐ง๐  ๐ฐ๐ข๐ญ๐ก ๐’๐ฉ๐ž๐œ๐ญ๐ซ๐ฎ๐ฆ ๐ŸŽฏ Full walkthrough on how to get started with Spectrum and TRL for efficient fine-tuning. ๐Ÿ“” ๐Ÿ‘ฃ https://huggingface.co/blog/anakin87/spectrum --- Looking to fine-tune Language Models efficiently and save on computational resources? One popular method is QLoRa, which quantizes the original model and trains low-rank adapters on top. It's quite effective and uses less GPU than full fine-tuning. However, QLoRa applies Low-Rank Adaptation uniformly across the entire model. What if we could identify the most informative layers and only fine-tune those? ๐Ÿค” This is exactly what Spectrum does! ๐Ÿ‘‡ ๐Ÿ”ฌ Spectrum analyzes the weight matrices for all layers in a Language Model and calculates a Signal to Noise Ratio (SNR) for each one. (It uses Random Matrix Theory and Marchenko-Pastur distribution to distinguish signal from noise.) ๐ŸŽฏ Based on a chosen percentage (say, 25%), Spectrum selects the most informative layers of each type (mlp.down_proj, self_attn.o_proj, etc.). You can then โ„๏ธ freeze the rest of the model and focus your ๐Ÿ‹๏ธโ€โ™‚๏ธ training on the chosen layers. ๐Ÿ† Results/Evaluation - Spectrum is competitive with full fine-tuning and beats QLoRA on benchmarks. - While QLoRA is more memory-efficient on a single GPU, Spectrum shines in distributed training setups. - Great models trained with Spectrum: Dolphin models, Llama 3.1 Storm, numerous models by VAGO Solutions... --- For a practical guide, check out the article above.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/626505d493e0b04d75710566/9rfJc9ORXU9J5a42Ev3v6.png", "fullname": "Stefano Fiorucci", "name": "anakin87", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 66, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/626505d493e0b04d75710566/fVCMAKAU5KCYhbzCL_qBg.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "osanseviero" ], "count": 2 }, { "reaction": "๐Ÿš€", "users": [ "gsarti" ], "count": 1 } ]
2024-09-03T10:00:04.000Z
2024-09-05T08:17:30.967Z
[]
/posts/anakin87/865363319225333
1,080
1
578032749040253
[ { "type": "text", "value": "๐๐ž๐ฐ ๐‘๐ž๐ฅ๐ž๐š๐ฌ๐ž: ๐Œ๐š๐ฃ๐จ๐ซ ๐“๐Ž๐Œ ๐ƒ๐ข๐ ๐ข๐ญ๐š๐ฅ ๐„๐ฅ๐ž๐ฏ๐š๐ญ๐ข๐จ๐ง ๐Œ๐จ๐๐ž๐ฅ ๐„๐ฑ๐ฉ๐š๐ง๐ฌ๐ข๐จ๐ง ๐Ÿ—บ๏ธ", "raw": "๐๐ž๐ฐ ๐‘๐ž๐ฅ๐ž๐š๐ฌ๐ž: ๐Œ๐š๐ฃ๐จ๐ซ ๐“๐Ž๐Œ ๐ƒ๐ข๐ ๐ข๐ญ๐š๐ฅ ๐„๐ฅ๐ž๐ฏ๐š๐ญ๐ข๐จ๐ง ๐Œ๐จ๐๐ž๐ฅ ๐„๐ฑ๐ฉ๐š๐ง๐ฌ๐ข๐จ๐ง ๐Ÿ—บ๏ธ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Dataset: ", "raw": "Dataset: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/Major-TOM/Core-DEM", "resource": { "type": "dataset", "id": "Major-TOM/Core-DEM", "discussionNum": null }, "url": "https://huggingface.co/datasets/Major-TOM/Core-DEM", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Today with European Space Agency - ESA and Adobe Research, we release a global expansion to Major TOM with GLO-30 DEM data.", "raw": "Today with European Space Agency - ESA and Adobe Research, we release a global expansion to Major TOM with GLO-30 DEM data.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "You can now instantly access nearly 2M of Major TOM samples with elevation data to build your next AI model for EO. ๐ŸŒ ", "raw": "You can now instantly access nearly 2M of Major TOM samples with elevation data to build your next AI model for EO. ๐ŸŒ ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ” Browse the data in our usual viewer app: ", "raw": "๐Ÿ” Browse the data in our usual viewer app: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Major-TOM/MajorTOM-Core-Viewer", "resource": { "type": "space", "id": "Major-TOM/MajorTOM-Core-Viewer", "discussionNum": null }, "url": "https://huggingface.co/spaces/Major-TOM/MajorTOM-Core-Viewer", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Fantastic work championed by Paul Borne--Pons ", "raw": "Fantastic work championed by Paul Borne--Pons ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@NewtNewt", "resource": null, "url": null, "href": null, "user": "NewtNewt", "lang": null, "code": null, "label": null }, { "type": "text", "value": " ๐Ÿš€", "raw": " ๐Ÿš€", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
๐๐ž๐ฐ ๐‘๐ž๐ฅ๐ž๐š๐ฌ๐ž: ๐Œ๐š๐ฃ๐จ๐ซ ๐“๐Ž๐Œ ๐ƒ๐ข๐ ๐ข๐ญ๐š๐ฅ ๐„๐ฅ๐ž๐ฏ๐š๐ญ๐ข๐จ๐ง ๐Œ๐จ๐๐ž๐ฅ ๐„๐ฑ๐ฉ๐š๐ง๐ฌ๐ข๐จ๐ง ๐Ÿ—บ๏ธ Dataset: https://huggingface.co/datasets/Major-TOM/Core-DEM Today with European Space Agency - ESA and Adobe Research, we release a global expansion to Major TOM with GLO-30 DEM data. You can now instantly access nearly 2M of Major TOM samples with elevation data to build your next AI model for EO. ๐ŸŒ ๐Ÿ” Browse the data in our usual viewer app: https://huggingface.co/spaces/Major-TOM/MajorTOM-Core-Viewer Fantastic work championed by Paul Borne--Pons @NewtNewt ๐Ÿš€
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1678741407493-6304c06eeb6d777a838eab63.png", "fullname": "Mikolaj Czerkawski", "name": "mikonvergence", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 25, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6304c06eeb6d777a838eab63/7BtZPtS--GFa_2rLuTxFN.mp4" } ]
[ { "avatarUrl": "/avatars/83cf39dd0f5895e7d7e6ae5a80b47deb.svg", "fullname": "PaulBP", "name": "NewtNewt", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1 } ]
[ { "reaction": "โค๏ธ", "users": [ "Tonic", "AtAndDev", "NewtNewt", "osanseviero", "Obenlia", "robmarkcole" ], "count": 6 }, { "reaction": "๐Ÿง ", "users": [ "Tonic", "AtAndDev", "bmorphism", "osanseviero" ], "count": 4 }, { "reaction": "๐Ÿš€", "users": [ "Tonic", "AtAndDev", "John6666", "osanseviero" ], "count": 4 }, { "reaction": "๐Ÿค", "users": [ "Tonic", "pduf", "AtAndDev" ], "count": 3 }, { "reaction": "๐Ÿ‘€", "users": [ "Tonic", "AtAndDev" ], "count": 2 }, { "reaction": "๐Ÿค—", "users": [ "Tonic", "AtAndDev" ], "count": 2 }, { "reaction": "๐Ÿ˜Ž", "users": [ "Tonic", "AtAndDev" ], "count": 2 } ]
2024-09-03T08:05:29.000Z
2024-09-03T08:05:29.710Z
[]
/posts/mikonvergence/578032749040253
2,196
0
622098077829042
[ { "type": "text", "value": "# Excited to Share: New LLM Tokenization - Convert Text to tokens and vice versa! ๐Ÿš€", "raw": "# Excited to Share: New LLM Tokenization - Convert Text to tokens and vice versa! ๐Ÿš€", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "I've just developed a powerful tool for anyone working with Language Models (LLMs) or diving into Natural Language Processing (NLP). ", "raw": "I've just developed a powerful tool for anyone working with Language Models (LLMs) or diving into Natural Language Processing (NLP). ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ” Introducing the LLM Tokenization - Convert Text to tokens and vice versa!!", "raw": "๐Ÿ” Introducing the LLM Tokenization - Convert Text to tokens and vice versa!!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Key Features:", "raw": "Key Features:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Convert text to tokens and token IDs", "raw": "- Convert text to tokens and token IDs", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Reverse engineer: convert token IDs back to text", "raw": "- Reverse engineer: convert token IDs back to text", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Support for popular models: LLama3 (Will add more models iteratively)", "raw": "- Support for popular models: LLama3 (Will add more models iteratively)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- User-friendly Gradio interface for easy interaction", "raw": "- User-friendly Gradio interface for easy interaction", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Whether you're debugging your NLP pipeline, exploring how different models tokenize text, or just curious about the inner workings of LLMs, this tool is for you!", "raw": "Whether you're debugging your NLP pipeline, exploring how different models tokenize text, or just curious about the inner workings of LLMs, this tool is for you!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ‘ฉโ€๐Ÿ’ป Tech Stack:", "raw": "๐Ÿ‘ฉโ€๐Ÿ’ป Tech Stack:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Python", "raw": "- Python", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Gradio for the web interface", "raw": "- Gradio for the web interface", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Hugging Face Transformers for tokenization", "raw": "- Hugging Face Transformers for tokenization", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "The application is deployed in Hugging Face spaces as Gradio application", "raw": "The application is deployed in Hugging Face spaces as Gradio application", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ”— Try it out: ", "raw": "๐Ÿ”— Try it out: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://lnkd.in/g6R5z9k2", "resource": null, "url": null, "href": "https://lnkd.in/g6R5z9k2", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "#NLP #MachineLearning #AI #PythonDevelopment #OpenSource", "raw": "#NLP #MachineLearning #AI #PythonDevelopment #OpenSource", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
# Excited to Share: New LLM Tokenization - Convert Text to tokens and vice versa! ๐Ÿš€ I've just developed a powerful tool for anyone working with Language Models (LLMs) or diving into Natural Language Processing (NLP). ๐Ÿ” Introducing the LLM Tokenization - Convert Text to tokens and vice versa!! Key Features: - Convert text to tokens and token IDs - Reverse engineer: convert token IDs back to text - Support for popular models: LLama3 (Will add more models iteratively) - User-friendly Gradio interface for easy interaction Whether you're debugging your NLP pipeline, exploring how different models tokenize text, or just curious about the inner workings of LLMs, this tool is for you! ๐Ÿ‘ฉโ€๐Ÿ’ป Tech Stack: - Python - Gradio for the web interface - Hugging Face Transformers for tokenization The application is deployed in Hugging Face spaces as Gradio application ๐Ÿ”— Try it out: https://lnkd.in/g6R5z9k2 #NLP #MachineLearning #AI #PythonDevelopment #OpenSource
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f6ddf835e78cc6b0ed31e5d/Lf6aTuebYrSBXEDE4q4to.jpeg", "fullname": "Prasanna Kumar V", "name": "vpkprasanna", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 5, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-09-03T06:19:33.000Z
2024-09-03T06:20:17.275Z
[]
/posts/vpkprasanna/622098077829042
493
0
281598302766823
[ { "type": "text", "value": "I started training a public LoRA style (2 seperate training each on 4x A6000).", "raw": "I started training a public LoRA style (2 seperate training each on 4x A6000).", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Experimenting captions vs non-captions. So we will see which yields best results for style training on FLUX.", "raw": "Experimenting captions vs non-captions. So we will see which yields best results for style training on FLUX.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Generated captions with multi-GPU batch Joycaption app.", "raw": "Generated captions with multi-GPU batch Joycaption app.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "I am showing 5 examples of what Joycaption generates on FLUX dev. Left images are the original style images from the dataset.", "raw": "I am showing 5 examples of what Joycaption generates on FLUX dev. Left images are the original style images from the dataset.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "I used my multi-GPU Joycaption APP (used 8x A6000 for ultra fast captioning) : ", "raw": "I used my multi-GPU Joycaption APP (used 8x A6000 for ultra fast captioning) : ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://www.patreon.com/posts/110613301", "resource": null, "url": null, "href": "https://www.patreon.com/posts/110613301", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "I used my Gradio batch caption editor to edit some words and add activation token as ohwx 3d render : ", "raw": "I used my Gradio batch caption editor to edit some words and add activation token as ohwx 3d render : ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://www.patreon.com/posts/108992085", "resource": null, "url": null, "href": "https://www.patreon.com/posts/108992085", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "The no caption dataset uses only ohwx 3d render as caption", "raw": "The no caption dataset uses only ohwx 3d render as caption", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "I am using my newest 4x_GPU_Rank_1_SLOW_Better_Quality.json on 4X A6000 GPU and train 500 epochs โ€” 114 images : ", "raw": "I am using my newest 4x_GPU_Rank_1_SLOW_Better_Quality.json on 4X A6000 GPU and train 500 epochs โ€” 114 images : ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://www.patreon.com/posts/110879657", "resource": null, "url": null, "href": "https://www.patreon.com/posts/110879657", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Total step count is being 500 * 114 / 4 (4x GPU โ€” batch size 1) = 14250", "raw": "Total step count is being 500 * 114 / 4 (4x GPU โ€” batch size 1) = 14250", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Taking 37 hours currently if I donโ€™t terminate early", "raw": "Taking 37 hours currently if I donโ€™t terminate early", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Will save a checkpoint once every 25 epochs", "raw": "Will save a checkpoint once every 25 epochs", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Full Windows Kohya LoRA training tutorial : ", "raw": "Full Windows Kohya LoRA training tutorial : ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://youtu.be/nySGu12Y05k", "resource": null, "url": null, "href": "https://youtu.be/nySGu12Y05k", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Full cloud tutorial I am still editing", "raw": "Full cloud tutorial I am still editing", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Hopefully will share trained LoRA on Hugging Face and CivitAI along with full dataset including captions.", "raw": "Hopefully will share trained LoRA on Hugging Face and CivitAI along with full dataset including captions.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "I got permission to share dataset but canโ€™t be used commercially.", "raw": "I got permission to share dataset but canโ€™t be used commercially.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Also I will hopefully share full workflow in the CivitAI and Hugging Face LoRA pages.", "raw": "Also I will hopefully share full workflow in the CivitAI and Hugging Face LoRA pages.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
I started training a public LoRA style (2 seperate training each on 4x A6000). Experimenting captions vs non-captions. So we will see which yields best results for style training on FLUX. Generated captions with multi-GPU batch Joycaption app. I am showing 5 examples of what Joycaption generates on FLUX dev. Left images are the original style images from the dataset. I used my multi-GPU Joycaption APP (used 8x A6000 for ultra fast captioning) : https://www.patreon.com/posts/110613301 I used my Gradio batch caption editor to edit some words and add activation token as ohwx 3d render : https://www.patreon.com/posts/108992085 The no caption dataset uses only ohwx 3d render as caption I am using my newest 4x_GPU_Rank_1_SLOW_Better_Quality.json on 4X A6000 GPU and train 500 epochs โ€” 114 images : https://www.patreon.com/posts/110879657 Total step count is being 500 * 114 / 4 (4x GPU โ€” batch size 1) = 14250 Taking 37 hours currently if I donโ€™t terminate early Will save a checkpoint once every 25 epochs Full Windows Kohya LoRA training tutorial : https://youtu.be/nySGu12Y05k Full cloud tutorial I am still editing Hopefully will share trained LoRA on Hugging Face and CivitAI along with full dataset including captions. I got permission to share dataset but canโ€™t be used commercially. Also I will hopefully share full workflow in the CivitAI and Hugging Face LoRA pages.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672531901326-6345bd89fe134dfd7a0dba40.png", "fullname": "Furkan Gรถzรผkara", "name": "MonsterMMORPG", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 368, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/uzDY7XcoU-5y-ObSoCLoN.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/vW1OhzwcMn6gglsKc5XDp.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/idjp8LDSEHFhZ6PZCM7qd.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/d2kopHdFjRxmBYDCMr_17.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/3flDh6GgZ0DvCPA49f_sI.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/RW3IN9dJwqURTnwIII7T5.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/p2VR9MYj0zUj21J8Ut4Ez.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/bv4ALmepdH4Rsf87xTkmI.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/avgizrEGzfrO8tjkNQxcj.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/9Jjd-_y8Q6WwU50Wpds0p.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/L1ghkrFru08rn3wJcU9HY.png" } ]
[]
[ { "reaction": "๐Ÿš€", "users": [ "MonsterMMORPG", "ajibawa-2023", "xziayro" ], "count": 3 }, { "reaction": "โž•", "users": [ "MonsterMMORPG", "Triangalogin", "pduf" ], "count": 3 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "MonsterMMORPG" ], "count": 2 }, { "reaction": "๐Ÿค—", "users": [ "MonsterMMORPG", "whiplashG" ], "count": 2 }, { "reaction": "๐Ÿ‘", "users": [ "MonsterMMORPG", "erblicken" ], "count": 2 }, { "reaction": "๐Ÿ”ฅ", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "โค๏ธ", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "๐Ÿง ", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "๐Ÿ˜Ž", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "๐Ÿค", "users": [ "MonsterMMORPG" ], "count": 1 }, { "reaction": "๐Ÿคฏ", "users": [ "MonsterMMORPG" ], "count": 1 } ]
2024-09-02T23:09:39.000Z
2024-09-03T00:19:10.257Z
[]
/posts/MonsterMMORPG/281598302766823
2,393
0
810635856263958
[ { "type": "text", "value": "Spent a few minutes to build an alternative to Character AI on top of llama3.1 405B through SambaNova's super fast inference API ", "raw": "Spent a few minutes to build an alternative to Character AI on top of llama3.1 405B through SambaNova's super fast inference API ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Space: ", "raw": "Space: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/kz919/Persona-AI", "resource": { "type": "space", "id": "kz919/Persona-AI", "discussionNum": null }, "url": "https://huggingface.co/spaces/kz919/Persona-AI", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "API referral link: ", "raw": "API referral link: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://sambanova.ai/fast-api?api_ref=907266", "resource": null, "url": null, "href": "https://sambanova.ai/fast-api?api_ref=907266", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
Spent a few minutes to build an alternative to Character AI on top of llama3.1 405B through SambaNova's super fast inference API Space: https://huggingface.co/spaces/kz919/Persona-AI API referral link: https://sambanova.ai/fast-api?api_ref=907266
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62140dcdcf7928035e8135ad/FTiirwS_L6IaLHmHwIo2g.png", "fullname": "Kaizhao Liang", "name": "kz919", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 34, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/62140dcdcf7928035e8135ad/-sjYE0eR_9QmmXmV7Nzhy.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/62140dcdcf7928035e8135ad/KjuAy-QnfL_R8TbjEByo0.png" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "kz919", "zolicsaki", "deki" ], "count": 3 }, { "reaction": "๐Ÿ˜Ž", "users": [ "kz919", "John6666", "zolicsaki" ], "count": 3 }, { "reaction": "๐Ÿš€", "users": [ "kz919", "zolicsaki" ], "count": 2 }, { "reaction": "๐Ÿค—", "users": [ "kz919", "zolicsaki" ], "count": 2 }, { "reaction": "๐Ÿคฏ", "users": [ "kz919", "zolicsaki" ], "count": 2 }, { "reaction": "๐Ÿง ", "users": [ "kz919", "zolicsaki" ], "count": 2 } ]
2024-09-02T21:33:14.000Z
2024-09-03T15:00:46.298Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/66c75fe82c2207bb1732c672/X_a8y4ZrSAQEylKpERMFL.jpeg", "fullname": "Scott Cawthon", "name": "Opa-Opa", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false } ]
/posts/kz919/810635856263958
1,584
3
887755095475831
[ { "type": "text", "value": "ML people on a long flight", "raw": "ML people on a long flight", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "(See picture)", "raw": "(See picture)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
ML people on a long flight (See picture)
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1657144463525-629a173153a72d997d3f57d0.jpeg", "fullname": "Santiago Viquez", "name": "santiviquez", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 84, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/629a173153a72d997d3f57d0/QgnsrMxm4_79msO6PHhtG.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-09-02T21:24:44.000Z
2024-11-06T22:29:34.200Z
[ { "avatarUrl": "/avatars/744eddaa7dfc34a57df9ce32a78059a0.svg", "fullname": "Tyrone Pierce", "name": "piercyy", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false } ]
/posts/santiviquez/887755095475831
426
1
332713316648258
[ { "type": "text", "value": "๐Ÿ™‹๐Ÿปโ€โ™‚๏ธhey there folks ,", "raw": "๐Ÿ™‹๐Ÿปโ€โ™‚๏ธhey there folks ,", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "โœ’๏ธInkubaLM has been trained from scratch using 1.9 billion tokens of data for five African languages, along with English and French data, totaling 2.4 billion tokens of data. It is capable of understanding and generating content in five African languages: Swahili, Yoruba, Hausa, isiZulu, and isiXhosa, as well as English and French.", "raw": "โœ’๏ธInkubaLM has been trained from scratch using 1.9 billion tokens of data for five African languages, along with English and French data, totaling 2.4 billion tokens of data. It is capable of understanding and generating content in five African languages: Swahili, Yoruba, Hausa, isiZulu, and isiXhosa, as well as English and French.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "model ", "raw": "model ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/lelapa/InkubaLM-0.4B", "resource": { "type": "model", "id": "lelapa/InkubaLM-0.4B", "discussionNum": null }, "url": "https://huggingface.co/lelapa/InkubaLM-0.4B", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "demo ", "raw": "demo ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Tonic/Inkuba-0.4B", "resource": { "type": "space", "id": "Tonic/Inkuba-0.4B", "discussionNum": null }, "url": "https://huggingface.co/spaces/Tonic/Inkuba-0.4B", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
๐Ÿ™‹๐Ÿปโ€โ™‚๏ธhey there folks , โœ’๏ธInkubaLM has been trained from scratch using 1.9 billion tokens of data for five African languages, along with English and French data, totaling 2.4 billion tokens of data. It is capable of understanding and generating content in five African languages: Swahili, Yoruba, Hausa, isiZulu, and isiXhosa, as well as English and French. model https://huggingface.co/lelapa/InkubaLM-0.4B demo https://huggingface.co/spaces/Tonic/Inkuba-0.4B
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 310, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿš€", "users": [ "monsoon-nlp", "AtAndDev", "KingNish", "louisbrulenaudet", "d0rj", "Moio" ], "count": 6 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "AtAndDev", "afrideva", "osanseviero" ], "count": 4 }, { "reaction": "๐Ÿค—", "users": [ "ijohn07" ], "count": 1 } ]
2024-09-02T20:29:07.000Z
2024-09-02T20:29:07.386Z
[]
/posts/Tonic/332713316648258
2,524
0
675665165365717
[ { "type": "text", "value": "Do you know how PCA and SVD are related?", "raw": "Do you know how PCA and SVD are related?", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "I explained it for everyone in this post!", "raw": "I explained it for everyone in this post!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Go and check it out: ", "raw": "Go and check it out: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://x.com/alexbodner_/status/1798357519678718062?s=46", "resource": null, "url": null, "href": "https://x.com/alexbodner_/status/1798357519678718062?s=46", "user": null, "lang": null, "code": null, "label": null } ]
Do you know how PCA and SVD are related? I explained it for everyone in this post! Go and check it out: https://x.com/alexbodner_/status/1798357519678718062?s=46
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/658880d499ed106ac888dd7a/wMv9-ZsJUw4QQnld_cci7.jpeg", "fullname": "Alexander Dylan Bodner", "name": "AlexBodner", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 28, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "andito" ], "count": 2 }, { "reaction": "๐Ÿ‘", "users": [ "TDL123" ], "count": 1 } ]
2024-09-02T18:06:33.000Z
2024-09-02T18:06:33.192Z
[]
/posts/AlexBodner/675665165365717
1,269
0
389301188834529
[ { "type": "text", "value": "Hey everyone ๐Ÿค—!", "raw": "Hey everyone ๐Ÿค—!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Check out this awesome new model for object segmentation!", "raw": "Check out this awesome new model for object segmentation!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/finegrain/finegrain-object-cutter", "resource": { "type": "space", "id": "finegrain/finegrain-object-cutter", "discussionNum": null }, "url": "https://huggingface.co/spaces/finegrain/finegrain-object-cutter", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": ".", "raw": ".", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "We (finegrain) have trained this new model in partnership with Nfinite and some of their synthetic data, the resulting model is incredibly accurate ๐Ÿš€.", "raw": "We (finegrain) have trained this new model in partnership with Nfinite and some of their synthetic data, the resulting model is incredibly accurate ๐Ÿš€.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Itโ€™s all open source under the MIT license (", "raw": "Itโ€™s all open source under the MIT license (", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/finegrain/finegrain-box-segmenter", "resource": { "type": "model", "id": "finegrain/finegrain-box-segmenter", "discussionNum": null }, "url": "https://huggingface.co/finegrain/finegrain-box-segmenter", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "), complete with a test set tailored for e-commerce (", "raw": "), complete with a test set tailored for e-commerce (", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/finegrain/finegrain-product-masks-lite", "resource": { "type": "dataset", "id": "finegrain/finegrain-product-masks-lite", "discussionNum": null }, "url": "https://huggingface.co/datasets/finegrain/finegrain-product-masks-lite", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "). Have fun experimenting with it!", "raw": "). Have fun experimenting with it!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
Hey everyone ๐Ÿค—! Check out this awesome new model for object segmentation! https://huggingface.co/spaces/finegrain/finegrain-object-cutter. We (finegrain) have trained this new model in partnership with Nfinite and some of their synthetic data, the resulting model is incredibly accurate ๐Ÿš€. Itโ€™s all open source under the MIT license (https://huggingface.co/finegrain/finegrain-box-segmenter), complete with a test set tailored for e-commerce (https://huggingface.co/datasets/finegrain/finegrain-product-masks-lite). Have fun experimenting with it!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1669043420538-6364f1784f773b7e4cede70c.jpeg", "fullname": "Laureฮทt Fainsin", "name": "1aurent", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 79, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6364f1784f773b7e4cede70c/JMY0ulmDOCo5-gaEBNspI.mp4" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "limiteinductive", "Sri-Vigneshwar-DJ", "John6666", "fdaudens", "TDL123", "1aurent", "deltheil", "piercus", "Mefistofele", "osanseviero", "vincentweisser", "victor", "dsmonk" ], "count": 13 }, { "reaction": "๐Ÿค—", "users": [ "liuxiao1037", "louisbrulenaudet" ], "count": 2 }, { "reaction": "๐Ÿ‘", "users": [ "MohammedEltoum", "Norod78" ], "count": 2 } ]
2024-09-02T15:30:18.000Z
2024-09-02T15:30:18.329Z
[]
/posts/1aurent/389301188834529
4,340
0
102743494418226
[ { "type": "text", "value": "๐Ÿค– ๐—ง๐—ต๐—ฒ ๐—”๐—œ ๐—ฆ๐—ฐ๐—ถ๐—ฒ๐—ป๐˜๐—ถ๐˜€๐˜: ๐—”๐—ด๐—ฒ๐—ป๐˜๐—ถ๐—ฐ, ๐—ณ๐˜‚๐—น๐—น๐˜†-๐—ฎ๐˜‚๐˜๐—ผ๐—บ๐—ฎ๐˜๐—ฒ๐—ฑ ๐—ฟ๐—ฒ๐˜€๐—ฒ๐—ฎ๐—ฟ๐—ฐ๐—ต ๐—ฝ๐—ถ๐—ฝ๐—ฒ๐—น๐—ถ๐—ป๐—ฒ ๐—ณ๐—ผ๐—ฟ ๐˜‚๐—ป๐—ฑ๐—ฒ๐—ฟ $๐Ÿญ๐Ÿฑ ๐—ฝ๐—ฒ๐—ฟ ๐—ฝ๐—ฎ๐—ฝ๐—ฒ๐—ฟ", "raw": "๐Ÿค– ๐—ง๐—ต๐—ฒ ๐—”๐—œ ๐—ฆ๐—ฐ๐—ถ๐—ฒ๐—ป๐˜๐—ถ๐˜€๐˜: ๐—”๐—ด๐—ฒ๐—ป๐˜๐—ถ๐—ฐ, ๐—ณ๐˜‚๐—น๐—น๐˜†-๐—ฎ๐˜‚๐˜๐—ผ๐—บ๐—ฎ๐˜๐—ฒ๐—ฑ ๐—ฟ๐—ฒ๐˜€๐—ฒ๐—ฎ๐—ฟ๐—ฐ๐—ต ๐—ฝ๐—ถ๐—ฝ๐—ฒ๐—น๐—ถ๐—ป๐—ฒ ๐—ณ๐—ผ๐—ฟ ๐˜‚๐—ป๐—ฑ๐—ฒ๐—ฟ $๐Ÿญ๐Ÿฑ ๐—ฝ๐—ฒ๐—ฟ ๐—ฝ๐—ฎ๐—ฝ๐—ฒ๐—ฟ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Researchers have just created an AI system that ๐—ฐ๐—ฎ๐—ป ๐—ฐ๐—ผ๐—ป๐—ฑ๐˜‚๐—ฐ๐˜ ๐—ฒ๐—ป๐˜๐—ถ๐—ฟ๐—ฒ ๐—ฟ๐—ฒ๐˜€๐—ฒ๐—ฎ๐—ฟ๐—ฐ๐—ต ๐—ฝ๐—ฟ๐—ผ๐—ท๐—ฒ๐—ฐ๐˜๐˜€ ๐—ณ๐—ฟ๐—ผ๐—บ ๐˜€๐˜๐—ฎ๐—ฟ๐˜ ๐˜๐—ผ ๐—ณ๐—ถ๐—ป๐—ถ๐˜€๐—ต, ๐—ฝ๐—ผ๐˜๐—ฒ๐—ป๐˜๐—ถ๐—ฎ๐—น๐—น๐˜† ๐—ฟ๐—ฒ๐˜ƒ๐—ผ๐—น๐˜‚๐˜๐—ถ๐—ผ๐—ป๐—ถ๐˜‡๐—ถ๐—ป๐—ด ๐—ต๐—ผ๐˜„ ๐˜€๐—ฐ๐—ถ๐—ฒ๐—ป๐˜๐—ถ๐—ณ๐—ถ๐—ฐ ๐—ฑ๐—ถ๐˜€๐—ฐ๐—ผ๐˜ƒ๐—ฒ๐—ฟ๐—ถ๐—ฒ๐˜€ ๐—ฎ๐—ฟ๐—ฒ ๐—บ๐—ฎ๐—ฑ๐—ฒ.", "raw": "Researchers have just created an AI system that ๐—ฐ๐—ฎ๐—ป ๐—ฐ๐—ผ๐—ป๐—ฑ๐˜‚๐—ฐ๐˜ ๐—ฒ๐—ป๐˜๐—ถ๐—ฟ๐—ฒ ๐—ฟ๐—ฒ๐˜€๐—ฒ๐—ฎ๐—ฟ๐—ฐ๐—ต ๐—ฝ๐—ฟ๐—ผ๐—ท๐—ฒ๐—ฐ๐˜๐˜€ ๐—ณ๐—ฟ๐—ผ๐—บ ๐˜€๐˜๐—ฎ๐—ฟ๐˜ ๐˜๐—ผ ๐—ณ๐—ถ๐—ป๐—ถ๐˜€๐—ต, ๐—ฝ๐—ผ๐˜๐—ฒ๐—ป๐˜๐—ถ๐—ฎ๐—น๐—น๐˜† ๐—ฟ๐—ฒ๐˜ƒ๐—ผ๐—น๐˜‚๐˜๐—ถ๐—ผ๐—ป๐—ถ๐˜‡๐—ถ๐—ป๐—ด ๐—ต๐—ผ๐˜„ ๐˜€๐—ฐ๐—ถ๐—ฒ๐—ป๐˜๐—ถ๐—ณ๐—ถ๐—ฐ ๐—ฑ๐—ถ๐˜€๐—ฐ๐—ผ๐˜ƒ๐—ฒ๐—ฟ๐—ถ๐—ฒ๐˜€ ๐—ฎ๐—ฟ๐—ฒ ๐—บ๐—ฎ๐—ฑ๐—ฒ.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "It doesn't just assist with specific tasks - it automates the entire research process, from generating ideas to writing and reviewing papers.", "raw": "It doesn't just assist with specific tasks - it automates the entire research process, from generating ideas to writing and reviewing papers.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "1 - brainstorm novel research directions, 2- write and execute code for experiments & visualize results, get references, and even 3- write up findings in a full academic paper format!", "raw": "1 - brainstorm novel research directions, 2- write and execute code for experiments & visualize results, get references, and even 3- write up findings in a full academic paper format!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "And it can do all this for under $15 per paper! ๐Ÿคฏ", "raw": "And it can do all this for under $15 per paper! ๐Ÿคฏ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐—ž๐—ฒ๐˜† ๐—ถ๐—ป๐˜€๐—ถ๐—ด๐—ต๐˜๐˜€:", "raw": "๐—ž๐—ฒ๐˜† ๐—ถ๐—ป๐˜€๐—ถ๐—ด๐—ต๐˜๐˜€:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿง  Generates novel research ideas across multiple topics (e.g. diffusion modeling, transformers, learning dynamics aka โ€œgrokkingโ€)", "raw": "๐Ÿง  Generates novel research ideas across multiple topics (e.g. diffusion modeling, transformers, learning dynamics aka โ€œgrokkingโ€)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ‘จโ€๐Ÿ’ป Uses open-source coding assistant Aider to implement ideas and run experiments. This is especially important since this agentic assistant can iterate if it fails somewhere.", "raw": "๐Ÿ‘จโ€๐Ÿ’ป Uses open-source coding assistant Aider to implement ideas and run experiments. This is especially important since this agentic assistant can iterate if it fails somewhere.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ“Š Visualizes results and plans follow-up experiments (up to 5 rounds)", "raw": "๐Ÿ“Š Visualizes results and plans follow-up experiments (up to 5 rounds)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "โœ๏ธ Writes full academic papers, including finding references using Semantic Search API", "raw": "โœ๏ธ Writes full academic papers, including finding references using Semantic Search API", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ•ต๏ธ Runs a simulated peer review process to evaluate paper quality", "raw": "๐Ÿ•ต๏ธ Runs a simulated peer review process to evaluate paper quality", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ’ฐ Total cost per paper is under $15. This system can generate \"hundreds of interesting, medium-quality papers\" in just a week !", "raw": "๐Ÿ’ฐ Total cost per paper is under $15. This system can generate \"hundreds of interesting, medium-quality papers\" in just a week !", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐—ฆ๐˜๐—ถ๐—น๐—น ๐—ป๐—ผ๐˜ ๐—ฟ๐—ฒ๐—ฎ๐—ฑ๐˜† ๐˜๐—ผ ๐—ณ๐—ถ๐—น๐—น ๐—œ๐—–๐—Ÿ๐—ฅ ๐˜„๐—ถ๐˜๐—ต ๐—ฝ๐—ฎ๐—ฝ๐—ฒ๐—ฟ๐˜€:", "raw": "๐—ฆ๐˜๐—ถ๐—น๐—น ๐—ป๐—ผ๐˜ ๐—ฟ๐—ฒ๐—ฎ๐—ฑ๐˜† ๐˜๐—ผ ๐—ณ๐—ถ๐—น๐—น ๐—œ๐—–๐—Ÿ๐—ฅ ๐˜„๐—ถ๐˜๐—ต ๐—ฝ๐—ฎ๐—ฝ๐—ฒ๐—ฟ๐˜€:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ” Ideas generated in one domain tend to be repetitive across different runs, and even different language model", "raw": "๐Ÿ” Ideas generated in one domain tend to be repetitive across different runs, and even different language model", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ‘€ Does not use vision capabilities to fix visual issues in plots", "raw": "๐Ÿ‘€ Does not use vision capabilities to fix visual issues in plots", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ’ญ Models occasionally hallucinate entire results tables", "raw": "๐Ÿ’ญ Models occasionally hallucinate entire results tables", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "โ‡’ Only few of the generated papers would actually meet the threshold for acceptance at a top AI conference", "raw": "โ‡’ Only few of the generated papers would actually meet the threshold for acceptance at a top AI conference", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ‘‰ย Read their paper: ", "raw": "๐Ÿ‘‰ย Read their paper: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2408.06292", "resource": { "type": "paper", "id": "2408.06292", "discussionNum": null }, "url": "https://huggingface.co/papers/2408.06292", "href": null, "user": null, "lang": null, "code": null, "label": "The AI Scientist: Towards Fully Automated Open-Ended Scientific\n Discovery (2408.06292)" } ]
๐Ÿค– ๐—ง๐—ต๐—ฒ ๐—”๐—œ ๐—ฆ๐—ฐ๐—ถ๐—ฒ๐—ป๐˜๐—ถ๐˜€๐˜: ๐—”๐—ด๐—ฒ๐—ป๐˜๐—ถ๐—ฐ, ๐—ณ๐˜‚๐—น๐—น๐˜†-๐—ฎ๐˜‚๐˜๐—ผ๐—บ๐—ฎ๐˜๐—ฒ๐—ฑ ๐—ฟ๐—ฒ๐˜€๐—ฒ๐—ฎ๐—ฟ๐—ฐ๐—ต ๐—ฝ๐—ถ๐—ฝ๐—ฒ๐—น๐—ถ๐—ป๐—ฒ ๐—ณ๐—ผ๐—ฟ ๐˜‚๐—ป๐—ฑ๐—ฒ๐—ฟ $๐Ÿญ๐Ÿฑ ๐—ฝ๐—ฒ๐—ฟ ๐—ฝ๐—ฎ๐—ฝ๐—ฒ๐—ฟ Researchers have just created an AI system that ๐—ฐ๐—ฎ๐—ป ๐—ฐ๐—ผ๐—ป๐—ฑ๐˜‚๐—ฐ๐˜ ๐—ฒ๐—ป๐˜๐—ถ๐—ฟ๐—ฒ ๐—ฟ๐—ฒ๐˜€๐—ฒ๐—ฎ๐—ฟ๐—ฐ๐—ต ๐—ฝ๐—ฟ๐—ผ๐—ท๐—ฒ๐—ฐ๐˜๐˜€ ๐—ณ๐—ฟ๐—ผ๐—บ ๐˜€๐˜๐—ฎ๐—ฟ๐˜ ๐˜๐—ผ ๐—ณ๐—ถ๐—ป๐—ถ๐˜€๐—ต, ๐—ฝ๐—ผ๐˜๐—ฒ๐—ป๐˜๐—ถ๐—ฎ๐—น๐—น๐˜† ๐—ฟ๐—ฒ๐˜ƒ๐—ผ๐—น๐˜‚๐˜๐—ถ๐—ผ๐—ป๐—ถ๐˜‡๐—ถ๐—ป๐—ด ๐—ต๐—ผ๐˜„ ๐˜€๐—ฐ๐—ถ๐—ฒ๐—ป๐˜๐—ถ๐—ณ๐—ถ๐—ฐ ๐—ฑ๐—ถ๐˜€๐—ฐ๐—ผ๐˜ƒ๐—ฒ๐—ฟ๐—ถ๐—ฒ๐˜€ ๐—ฎ๐—ฟ๐—ฒ ๐—บ๐—ฎ๐—ฑ๐—ฒ. It doesn't just assist with specific tasks - it automates the entire research process, from generating ideas to writing and reviewing papers. 1 - brainstorm novel research directions, 2- write and execute code for experiments & visualize results, get references, and even 3- write up findings in a full academic paper format! And it can do all this for under $15 per paper! ๐Ÿคฏ ๐—ž๐—ฒ๐˜† ๐—ถ๐—ป๐˜€๐—ถ๐—ด๐—ต๐˜๐˜€: ๐Ÿง  Generates novel research ideas across multiple topics (e.g. diffusion modeling, transformers, learning dynamics aka โ€œgrokkingโ€) ๐Ÿ‘จโ€๐Ÿ’ป Uses open-source coding assistant Aider to implement ideas and run experiments. This is especially important since this agentic assistant can iterate if it fails somewhere. ๐Ÿ“Š Visualizes results and plans follow-up experiments (up to 5 rounds) โœ๏ธ Writes full academic papers, including finding references using Semantic Search API ๐Ÿ•ต๏ธ Runs a simulated peer review process to evaluate paper quality ๐Ÿ’ฐ Total cost per paper is under $15. This system can generate "hundreds of interesting, medium-quality papers" in just a week ! ๐—ฆ๐˜๐—ถ๐—น๐—น ๐—ป๐—ผ๐˜ ๐—ฟ๐—ฒ๐—ฎ๐—ฑ๐˜† ๐˜๐—ผ ๐—ณ๐—ถ๐—น๐—น ๐—œ๐—–๐—Ÿ๐—ฅ ๐˜„๐—ถ๐˜๐—ต ๐—ฝ๐—ฎ๐—ฝ๐—ฒ๐—ฟ๐˜€: ๐Ÿ” Ideas generated in one domain tend to be repetitive across different runs, and even different language model ๐Ÿ‘€ Does not use vision capabilities to fix visual issues in plots ๐Ÿ’ญ Models occasionally hallucinate entire results tables โ‡’ Only few of the generated papers would actually meet the threshold for acceptance at a top AI conference ๐Ÿ‘‰ย Read their paper: https://huggingface.co/papers/2408.06292
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg", "fullname": "Aymeric Roucher", "name": "m-ric", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 476, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/GZenzPhe-mYVWefu3CUn-.png" } ]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "wsuff", "Sri-Vigneshwar-DJ", "John6666", "Bruhn", "sugatoray", "KingNish", "louisbrulenaudet", "toshihikochen", "alielfilali01" ], "count": 9 }, { "reaction": "๐Ÿ‘€", "users": [ "Svngoku" ], "count": 1 }, { "reaction": "๐Ÿš€", "users": [ "Csplk" ], "count": 1 } ]
2024-09-02T15:22:25.000Z
2024-09-02T15:22:25.282Z
[]
/posts/m-ric/102743494418226
2,210
0
512165858999722
[ { "type": "text", "value": "Plugins in NiansuhAI", "raw": "Plugins in NiansuhAI", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Plugin Names:", "raw": "Plugin Names:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "1. WebSearch: Searches the web using search engines.", "raw": "1. WebSearch: Searches the web using search engines.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "2. Calculator: Evaluates mathematical expressions, extending the base Tool class.", "raw": "2. Calculator: Evaluates mathematical expressions, extending the base Tool class.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "3. WebBrowser: Extracts and summarizes information from web pages.", "raw": "3. WebBrowser: Extracts and summarizes information from web pages.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "4. Wikipedia: Retrieves information from Wikipedia using its API.", "raw": "4. Wikipedia: Retrieves information from Wikipedia using its API.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "5. Arxiv: Searches and fetches article information from Arxiv.", "raw": "5. Arxiv: Searches and fetches article information from Arxiv.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "6. WolframAlphaTool: Provides answers on math, science, technology, culture, society, and everyday life.", "raw": "6. WolframAlphaTool: Provides answers on math, science, technology, culture, society, and everyday life.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "These plugins currently support the GPT-4O-2024-08-06 model, which also supports image analysis.", "raw": "These plugins currently support the GPT-4O-2024-08-06 model, which also supports image analysis.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Try it now: ", "raw": "Try it now: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/spaces/NiansuhAI/chat", "resource": null, "url": null, "href": "https://huggingface.co/spaces/NiansuhAI/chat", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Similar to: ", "raw": "Similar to: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://hf.co/chat", "resource": null, "url": null, "href": "https://hf.co/chat", "user": null, "lang": null, "code": null, "label": null } ]
Plugins in NiansuhAI Plugin Names: 1. WebSearch: Searches the web using search engines. 2. Calculator: Evaluates mathematical expressions, extending the base Tool class. 3. WebBrowser: Extracts and summarizes information from web pages. 4. Wikipedia: Retrieves information from Wikipedia using its API. 5. Arxiv: Searches and fetches article information from Arxiv. 6. WolframAlphaTool: Provides answers on math, science, technology, culture, society, and everyday life. These plugins currently support the GPT-4O-2024-08-06 model, which also supports image analysis. Try it now: https://huggingface.co/spaces/NiansuhAI/chat Similar to: https://hf.co/chat
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64cba00d710645aa7b04f281/a_-LPwd4wqRyi8sJ1QxjI.jpeg", "fullname": "Husnain", "name": "Niansuh", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 64, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "John6666", "TheDrunkenSnail", "leeloolee", "Sri-Vigneshwar-DJ", "Joseph717171", "Niansuh" ], "count": 6 }, { "reaction": "๐Ÿš€", "users": [ "Niansuh", "John6666", "Joseph717171" ], "count": 3 } ]
2024-09-02T12:57:09.000Z
2024-09-02T13:03:34.993Z
[]
/posts/Niansuh/512165858999722
2,373
0
680631748831020
[ { "type": "text", "value": "๐Ÿคฉ Amazing day. AWPortrait-FL finally here!", "raw": "๐Ÿคฉ Amazing day. AWPortrait-FL finally here!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿฆ– AWPortrait-FL is finetuned on FLUX.1-dev using the training set of AWPortrait-XL and nearly 2,000 fashion photography photos with extremely high aesthetic quality. ", "raw": "๐Ÿฆ– AWPortrait-FL is finetuned on FLUX.1-dev using the training set of AWPortrait-XL and nearly 2,000 fashion photography photos with extremely high aesthetic quality. ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿค—Model: ", "raw": "๐Ÿค—Model: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/Shakker-Labs/AWPortrait-FL", "resource": { "type": "model", "id": "Shakker-Labs/AWPortrait-FL", "discussionNum": null }, "url": "https://huggingface.co/Shakker-Labs/AWPortrait-FL", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ™‡Demo: ", "raw": "๐Ÿ™‡Demo: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/vilarin/flux-labs", "resource": { "type": "space", "id": "vilarin/flux-labs", "discussionNum": null }, "url": "https://huggingface.co/spaces/vilarin/flux-labs", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
๐Ÿคฉ Amazing day. AWPortrait-FL finally here! ๐Ÿฆ– AWPortrait-FL is finetuned on FLUX.1-dev using the training set of AWPortrait-XL and nearly 2,000 fashion photography photos with extremely high aesthetic quality. ๐Ÿค—Model: https://huggingface.co/Shakker-Labs/AWPortrait-FL ๐Ÿ™‡Demo: https://huggingface.co/spaces/vilarin/flux-labs
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/642827944fe87caede802784/a7s3Ub9Cy6-PuuaX8wwXm.png", "fullname": "VILARIN", "name": "vilarin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 68, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/642827944fe87caede802784/8ZkmV-C5Uc-4U8N41_HmC.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/642827944fe87caede802784/GUyhoP12XQZ-DqK5zXW5Y.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/642827944fe87caede802784/MCP4knqBEFyKldRKShT9H.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/642827944fe87caede802784/LUGWK_jOjGP8ngXg97Ee2.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/642827944fe87caede802784/pxq-PgFj2eYUPYic2e3N-.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/642827944fe87caede802784/9lgsu9DkYO0LcqVv3pBPj.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/642827944fe87caede802784/6Ldc5VkkAexuFcK-67Od3.webp" } ]
[]
[ { "reaction": "โค๏ธ", "users": [ "orrinin", "samapika", "dillfrescott", "John6666", "wanghaofan", "ijohn07", "Bruhn", "ajibawa-2023", "gshreyash", "tanfar", "lunarflu", "louisbrulenaudet", "ShakkerAi-Labs", "linoyts", "Despina", "KingNish", "victor", "sasikiran", "Pranavan", "nbroad", "Sri-Vigneshwar-DJ", "privategeek24", "traltyaziking", "TDL123", "koochikoo25", "Taylor658", "AtAndDev", "huangy1", "tayyabmehar27", "Mefistofele", "ibrahim313", "awplanet" ], "count": 32 }, { "reaction": "๐Ÿคฏ", "users": [ "ibrahim313" ], "count": 1 }, { "reaction": "๐Ÿ”ฅ", "users": [ "ibrahim313" ], "count": 1 }, { "reaction": "๐Ÿš€", "users": [ "ibrahim313" ], "count": 1 } ]
2024-09-01T13:21:19.000Z
2024-09-05T16:11:33.300Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64282d3deb2891d3746a1f1e/V7xBCMfcShiMTjjJYaJBv.png", "fullname": "orrin", "name": "orrinin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/66d47c04b2302a63f24f1253/qWm-a6vYAmJgrhQ8kvner.jpeg", "fullname": "Samapika Priyadarshini", "name": "samapika", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64aea8ff67511bd3d965697b/Jxn52EmDF5RApJh8antxn.jpeg", "fullname": "Feynman Innovations", "name": "ajibawa-2023", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 137, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6340651b388c3fa40f9a5bc0/av1C4_S7bHGxAzOu8lOmG.jpeg", "fullname": "Adam Molnar", "name": "lunarflu", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 334, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63a567cdce5763e06f7af435/6E6ijsMOl9ys__Aznx4Si.jpeg", "fullname": "DynamicWang", "name": "awplanet", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 32, "isFollowing": false }, { "avatarUrl": "/avatars/f3839f73cd47dff15be3bdb0dbd3d50d.svg", "fullname": "001Anas", "name": "Mohammad121", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/vilarin/680631748831020
5,991
6
922470981780593
[ { "type": "text", "value": "Understanding the json format response with HF's Serverless Inference API ๐Ÿค—", "raw": "Understanding the json format response with HF's Serverless Inference API ๐Ÿค—", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "As it stands, there seems to be an inconsistency with the OpenAI documentation on the question of implementing the JSON response format using the InferenceClient completion API.", "raw": "As it stands, there seems to be an inconsistency with the OpenAI documentation on the question of implementing the JSON response format using the InferenceClient completion API.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "After investigating the InferenceClient source code, I share the official solution using a JSON Schema. This consolidates the structure of the response and simplifies parsing as part of an automated process for extracting metadata, information:", "raw": "After investigating the InferenceClient source code, I share the official solution using a JSON Schema. This consolidates the structure of the response and simplifies parsing as part of an automated process for extracting metadata, information:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "code_fence", "value": null, "raw": "```python\nfrom huggingface_hub import InferenceClient\n\nclient = InferenceClient(\"meta-llama/Meta-Llama-3-70B-Instruct\")\n\nmessages = [\n {\n \"role\": \"user\",\n \"content\": \"I saw a puppy a cat and a raccoon during my bike ride in the park. What did I saw and when?\",\n },\n]\n\nresponse_format = {\n \"type\": \"json\",\n \"value\": {\n \"properties\": {\n \"location\": {\"type\": \"string\"},\n \"activity\": {\"type\": \"string\"},\n \"animals_seen\": {\"type\": \"integer\", \"minimum\": 1, \"maximum\": 5},\n \"animals\": {\"type\": \"array\", \"items\": {\"type\": \"string\"}},\n },\n \"required\": [\"location\", \"activity\", \"animals_seen\", \"animals\"],\n },\n}\n\nresponse = client.chat_completion(\n messages=messages,\n response_format=response_format,\n max_tokens=500,\n)\n\nprint(response.choices[0].message.content)\n```", "resource": null, "url": null, "href": null, "user": null, "lang": "python", "code": "from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\"meta-llama/Meta-Llama-3-70B-Instruct\")\n\nmessages = [\n {\n \"role\": \"user\",\n \"content\": \"I saw a puppy a cat and a raccoon during my bike ride in the park. What did I saw and when?\",\n },\n]\n\nresponse_format = {\n \"type\": \"json\",\n \"value\": {\n \"properties\": {\n \"location\": {\"type\": \"string\"},\n \"activity\": {\"type\": \"string\"},\n \"animals_seen\": {\"type\": \"integer\", \"minimum\": 1, \"maximum\": 5},\n \"animals\": {\"type\": \"array\", \"items\": {\"type\": \"string\"}},\n },\n \"required\": [\"location\", \"activity\", \"animals_seen\", \"animals\"],\n },\n}\n\nresponse = client.chat_completion(\n messages=messages,\n response_format=response_format,\n max_tokens=500,\n)\n\nprint(response.choices[0].message.content)", "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "As a reminder, json mode is activated with the OpenAI client as follows:", "raw": "As a reminder, json mode is activated with the OpenAI client as follows:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "code_fence", "value": null, "raw": "```python\nresponse = client.chat.completions.create(\n model=\"gpt-3.5-turbo-0125\",\n messages=[...],\n response_format={\"type\": \"json_object\"}\n)\n```", "resource": null, "url": null, "href": null, "user": null, "lang": "python", "code": "response = client.chat.completions.create(\n model=\"gpt-3.5-turbo-0125\",\n messages=[...],\n response_format={\"type\": \"json_object\"}\n)", "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "One question remains unanswered, however, and will perhaps be answered by the community: it seems that an incompatibility persists for list of dictionaries generation, and currently, the production of simple dictionaries seems to be the only functional option.", "raw": "One question remains unanswered, however, and will perhaps be answered by the community: it seems that an incompatibility persists for list of dictionaries generation, and currently, the production of simple dictionaries seems to be the only functional option.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
Understanding the json format response with HF's Serverless Inference API ๐Ÿค— As it stands, there seems to be an inconsistency with the OpenAI documentation on the question of implementing the JSON response format using the InferenceClient completion API. After investigating the InferenceClient source code, I share the official solution using a JSON Schema. This consolidates the structure of the response and simplifies parsing as part of an automated process for extracting metadata, information: ```python from huggingface_hub import InferenceClient client = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct") messages = [ { "role": "user", "content": "I saw a puppy a cat and a raccoon during my bike ride in the park. What did I saw and when?", }, ] response_format = { "type": "json", "value": { "properties": { "location": {"type": "string"}, "activity": {"type": "string"}, "animals_seen": {"type": "integer", "minimum": 1, "maximum": 5}, "animals": {"type": "array", "items": {"type": "string"}}, }, "required": ["location", "activity", "animals_seen", "animals"], }, } response = client.chat_completion( messages=messages, response_format=response_format, max_tokens=500, ) print(response.choices[0].message.content) ``` As a reminder, json mode is activated with the OpenAI client as follows: ```python response = client.chat.completions.create( model="gpt-3.5-turbo-0125", messages=[...], response_format={"type": "json_object"} ) ``` One question remains unanswered, however, and will perhaps be answered by the community: it seems that an incompatibility persists for list of dictionaries generation, and currently, the production of simple dictionaries seems to be the only functional option.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6459fa0f5b3111fbe83286e1/UhCa7JNbtTjC6dgOjZtH0.jpeg", "fullname": "Louis Brulรฉ Naudet", "name": "louisbrulenaudet", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 176, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "victor", "wsuff", "rreed-pha", "osanseviero" ], "count": 5 } ]
2024-09-01T12:11:31.000Z
2024-09-02T12:21:08.618Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg", "fullname": "Victor Mustar", "name": "victor", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 2578, "isFollowing": false } ]
/posts/louisbrulenaudet/922470981780593
1,856
1
147425380710766
[ { "type": "text", "value": "I am training a controlnet model for Flux. And some of my experiences:", "raw": "I am training a controlnet model for Flux. And some of my experiences:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Checkpoint-10000:", "raw": "Checkpoint-10000:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://x.com/kadirnar_ai/status/1829831750471606668", "resource": null, "url": null, "href": "https://x.com/kadirnar_ai/status/1829831750471606668", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Checkpoint-12000:", "raw": "Checkpoint-12000:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://x.com/kadirnar_ai/status/1829889524962640001", "resource": null, "url": null, "href": "https://x.com/kadirnar_ai/status/1829889524962640001", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Checkpoint-14000:", "raw": "Checkpoint-14000:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://x.com/kadirnar_ai/status/1829989622878744711", "resource": null, "url": null, "href": "https://x.com/kadirnar_ai/status/1829989622878744711", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Checkpoint (16000-18000):", "raw": "Checkpoint (16000-18000):", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://x.com/kadirnar_ai/status/1830179551407665654", "resource": null, "url": null, "href": "https://x.com/kadirnar_ai/status/1830179551407665654", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Dataset: ", "raw": "Dataset: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/kadirnar/fluxdev_controlnet_16k", "resource": { "type": "dataset", "id": "kadirnar/fluxdev_controlnet_16k", "discussionNum": null }, "url": "https://huggingface.co/datasets/kadirnar/fluxdev_controlnet_16k", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "GPU: 1xA100(80GB)", "raw": "GPU: 1xA100(80GB)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "GPU Hours: 65 ", "raw": "GPU Hours: 65 ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
I am training a controlnet model for Flux. And some of my experiences: Checkpoint-10000: https://x.com/kadirnar_ai/status/1829831750471606668 Checkpoint-12000: https://x.com/kadirnar_ai/status/1829889524962640001 Checkpoint-14000: https://x.com/kadirnar_ai/status/1829989622878744711 Checkpoint (16000-18000): https://x.com/kadirnar_ai/status/1830179551407665654 Dataset: https://huggingface.co/datasets/kadirnar/fluxdev_controlnet_16k GPU: 1xA100(80GB) GPU Hours: 65
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1678181702571-619f7ba90df8731e0d8b6c54.jpeg", "fullname": "Kadir Nar", "name": "kadirnar", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 197, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/619f7ba90df8731e0d8b6c54/B5s69n0q8_HNlI6TRtlUI.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/619f7ba90df8731e0d8b6c54/vKCMZUw57mTfkMXQyJrjA.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/619f7ba90df8731e0d8b6c54/4DIOMrpRmxzXG7h8ZOKua.png" } ]
[]
[ { "reaction": "๐Ÿš€", "users": [ "tolgacangoz", "John6666", "gokaygokay", "Shinku", "AtAndDev", "xziayro", "Saugatkafley", "l3x13" ], "count": 8 }, { "reaction": "โค๏ธ", "users": [ "tolgacangoz", "louisbrulenaudet", "gokaygokay", "Pranavan", "AtAndDev", "xziayro", "osanseviero" ], "count": 7 }, { "reaction": "๐Ÿ”ฅ", "users": [ "tolgacangoz", "gokaygokay", "AtAndDev", "Sri-Vigneshwar-DJ" ], "count": 4 }, { "reaction": "๐Ÿ‘", "users": [ "jefinpaul", "Kazabra", "AtAndDev", "bomze" ], "count": 4 } ]
2024-09-01T09:55:15.000Z
2024-10-26T07:59:33.466Z
[ { "avatarUrl": "/avatars/bbdc1d48c816cb373013fb2d38501866.svg", "fullname": "ๆฒˆๆŸฏ", "name": "SKKK123", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/kadirnar/147425380710766
3,801
1
713480041248724
[ { "type": "text", "value": "Last Week in Medical AI: Top Research ", "raw": "Last Week in Medical AI: Top Research ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Papers/Models", "raw": "Papers/Models", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ… (August 25 - August 31, 2024)", "raw": "๐Ÿ… (August 25 - August 31, 2024)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- MultiMed: Multimodal Medical Benchmark", "raw": "- MultiMed: Multimodal Medical Benchmark", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- A Foundation model for generating chest X-ray images", "raw": "- A Foundation model for generating chest X-ray images", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- MEDSAGE: Medical Dialogue Summarization", "raw": "- MEDSAGE: Medical Dialogue Summarization", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Knowledge Graphs for Radiology Report Generation", "raw": "- Knowledge Graphs for Radiology Report Generation", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Exploring Multi-modal LLMs for Chest X-ray", "raw": "- Exploring Multi-modal LLMs for Chest X-ray", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Improving Clinical Note Generation", "raw": "- Improving Clinical Note Generation", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "...", "raw": "...", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Check the full thread : ", "raw": "Check the full thread : ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://x.com/OpenlifesciAI/status/1829984701324448051", "resource": null, "url": null, "href": "https://x.com/OpenlifesciAI/status/1829984701324448051", "user": null, "lang": null, "code": null, "label": null } ]
Last Week in Medical AI: Top Research Papers/Models ๐Ÿ… (August 25 - August 31, 2024) - MultiMed: Multimodal Medical Benchmark - A Foundation model for generating chest X-ray images - MEDSAGE: Medical Dialogue Summarization - Knowledge Graphs for Radiology Report Generation - Exploring Multi-modal LLMs for Chest X-ray - Improving Clinical Note Generation ... Check the full thread : https://x.com/OpenlifesciAI/status/1829984701324448051
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f3fe13d79c1ba4c353d0c19/XswyGe3OtOdZ6g7rnrgfc.png", "fullname": "Aaditya Ura", "name": "aaditya", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 221, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/UU2oDlXjjqDPKToE74hxD.jpeg" } ]
[]
[ { "reaction": "โค๏ธ", "users": [ "aaditya", "jaebumskiyomi", "aiisthebest", "ai-everyday", "adityaSaligram", "dblasko", "victor", "JCDentonInTheFresh" ], "count": 8 }, { "reaction": "๐Ÿค—", "users": [ "aaditya", "John6666", "jaebumskiyomi", "aiisthebest", "JCDentonInTheFresh" ], "count": 5 }, { "reaction": "๐Ÿ”ฅ", "users": [ "aaditya", "charanhu", "JCDentonInTheFresh" ], "count": 3 }, { "reaction": "๐Ÿš€", "users": [ "aaditya", "JCDentonInTheFresh", "Taylor658" ], "count": 3 }, { "reaction": "๐Ÿ‘", "users": [ "aiisthebest" ], "count": 1 } ]
2024-08-31T21:42:43.000Z
2024-09-04T09:58:24.502Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png", "fullname": "Omar Sanseviero", "name": "osanseviero", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2846, "isFollowing": false } ]
/posts/aaditya/713480041248724
3,002
1
416847424881120
[ { "type": "text", "value": "๐Ÿ’พ๐Ÿง How much VRAM will you need for training your AI model? ๐Ÿ’พ๐Ÿง ", "raw": "๐Ÿ’พ๐Ÿง How much VRAM will you need for training your AI model? ๐Ÿ’พ๐Ÿง ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Check out this app where you convert: ", "raw": "Check out this app where you convert: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Pytorch/tensorflow summary -> needed VRAM ", "raw": "Pytorch/tensorflow summary -> needed VRAM ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "or ", "raw": "or ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Parameter count -> needed VRAM", "raw": "Parameter count -> needed VRAM", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Use it in: ", "raw": "Use it in: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "http://howmuchvram.com", "resource": null, "url": null, "href": "http://howmuchvram.com", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "And everything is open source! Ask for new functionalities or contribute in:", "raw": "And everything is open source! Ask for new functionalities or contribute in:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://github.com/AlexBodner/How_Much_VRAM", "resource": null, "url": null, "href": "https://github.com/AlexBodner/How_Much_VRAM", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "If it's useful to you leave a star ๐ŸŒŸand share it to someone that will find the tool useful! ", "raw": "If it's useful to you leave a star ๐ŸŒŸand share it to someone that will find the tool useful! ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
๐Ÿ’พ๐Ÿง How much VRAM will you need for training your AI model? ๐Ÿ’พ๐Ÿง  Check out this app where you convert: Pytorch/tensorflow summary -> needed VRAM or Parameter count -> needed VRAM Use it in: http://howmuchvram.com And everything is open source! Ask for new functionalities or contribute in: https://github.com/AlexBodner/How_Much_VRAM If it's useful to you leave a star ๐ŸŒŸand share it to someone that will find the tool useful!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/658880d499ed106ac888dd7a/wMv9-ZsJUw4QQnld_cci7.jpeg", "fullname": "Alexander Dylan Bodner", "name": "AlexBodner", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 28, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "tuanlda78202", "louisbrulenaudet", "AlexBodner", "Bruhn", "den0620", "victor", "AtAndDev" ], "count": 8 }, { "reaction": "๐Ÿš€", "users": [ "rmayormartins", "AlexBodner", "whitebill", "erkhem-gantulga", "AtAndDev" ], "count": 5 }, { "reaction": "๐Ÿ‘", "users": [ "mwz", "AtAndDev", "ajibawa-2023", "jchataigne" ], "count": 4 }, { "reaction": "๐Ÿง ", "users": [ "AntonioTepsich", "AtAndDev" ], "count": 2 } ]
2024-08-31T19:07:47.000Z
2024-09-02T13:27:12.929Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/66d47c04b2302a63f24f1253/qWm-a6vYAmJgrhQ8kvner.jpeg", "fullname": "Samapika Priyadarshini", "name": "samapika", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "/avatars/781a110b5ac82d4fd4e28c9dd54e2667.svg", "fullname": "marcos", "name": "marcos9", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/658880d499ed106ac888dd7a/wMv9-ZsJUw4QQnld_cci7.jpeg", "fullname": "Alexander Dylan Bodner", "name": "AlexBodner", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 28, "isFollowing": false } ]
/posts/AlexBodner/416847424881120
3,782
3
660821907550330
[ { "type": "text", "value": "From Article 50 of the EU AI Act: ", "raw": "From Article 50 of the EU AI Act: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "\"2. Providers of AI systems, including general-purpose AI systems, generating synthetic audio, image, video or text content, shall ensure that the outputs of the AI system are marked in a machine-readable format and detectable as artificially generated or manipulated.\"", "raw": "\"2. Providers of AI systems, including general-purpose AI systems, generating synthetic audio, image, video or text content, shall ensure that the outputs of the AI system are marked in a machine-readable format and detectable as artificially generated or manipulated.\"", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "How might this be put into practice?", "raw": "How might this be put into practice?", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "I'm interested to know how content might be deemed as being \"detectable\" as artificially generated. I wonder if this will require an image be detectable as AI generated if it was copied out of the site / application it was created on?", "raw": "I'm interested to know how content might be deemed as being \"detectable\" as artificially generated. I wonder if this will require an image be detectable as AI generated if it was copied out of the site / application it was created on?", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Some sort of a watermark? LSB Stegranography? I wonder if openAI are already sneaking something like this into DALL-E images.", "raw": "Some sort of a watermark? LSB Stegranography? I wonder if openAI are already sneaking something like this into DALL-E images.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Some sort of hash, which allowing content to be looked up, and verified as AI generated?", "raw": "Some sort of hash, which allowing content to be looked up, and verified as AI generated?", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Would a pop up saying \"this output was generated with AI\"? suffice? Any ideas? Time is on the system provider's side, at least for now, as from what I can see this doesn't come into effect until August 2026.", "raw": "Would a pop up saying \"this output was generated with AI\"? suffice? Any ideas? Time is on the system provider's side, at least for now, as from what I can see this doesn't come into effect until August 2026.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "src: ", "raw": "src: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://artificialintelligenceact.eu/article/50/", "resource": null, "url": null, "href": "https://artificialintelligenceact.eu/article/50/", "user": null, "lang": null, "code": null, "label": null } ]
From Article 50 of the EU AI Act: "2. Providers of AI systems, including general-purpose AI systems, generating synthetic audio, image, video or text content, shall ensure that the outputs of the AI system are marked in a machine-readable format and detectable as artificially generated or manipulated." How might this be put into practice? I'm interested to know how content might be deemed as being "detectable" as artificially generated. I wonder if this will require an image be detectable as AI generated if it was copied out of the site / application it was created on? Some sort of a watermark? LSB Stegranography? I wonder if openAI are already sneaking something like this into DALL-E images. Some sort of hash, which allowing content to be looked up, and verified as AI generated? Would a pop up saying "this output was generated with AI"? suffice? Any ideas? Time is on the system provider's side, at least for now, as from what I can see this doesn't come into effect until August 2026. src: https://artificialintelligenceact.eu/article/50/
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/651d4e73acd8e9168ac92b04/WMYCWKx9MM8Xxj8vXursD.png", "fullname": "Jonah Ramponi", "name": "jonah-ramponi", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-08-31T18:44:33.000Z
2024-09-01T08:41:50.378Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60b4acc69c978cce68723b34/eEnAT3CgDcnYKa7PIj5FB.jpeg", "fullname": "Jannes Stubbemann", "name": "stubbi", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/jonah-ramponi/660821907550330
656
1
872552437419473
[ { "type": "text", "value": "I found this paper to be thought-provoking: \"Smaller, Weaker, Yet Better: Training LLM Reasoners via Compute-Optimal Sampling\" by Bansal, Hosseini, Agarwal, Tran, and Kazemi.", "raw": "I found this paper to be thought-provoking: \"Smaller, Weaker, Yet Better: Training LLM Reasoners via Compute-Optimal Sampling\" by Bansal, Hosseini, Agarwal, Tran, and Kazemi.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/abs/2408.16737", "resource": null, "url": null, "href": "https://arxiv.org/abs/2408.16737", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "The direct implication is that smaller models could be used to create cost-effective synthetic datasets. And on that note, in the Gemma terms of use, Google explicitly claims no rights on outputs generated from those models, which means one is free to synthgen from the Gemma line. Meta's Llama 3 licence forbids synthetic generation of outputs if used to improve other models. Relevant Mistral, Qwen, and Yi models under the Apache 2.0 license are unrestricted for this purpose.", "raw": "The direct implication is that smaller models could be used to create cost-effective synthetic datasets. And on that note, in the Gemma terms of use, Google explicitly claims no rights on outputs generated from those models, which means one is free to synthgen from the Gemma line. Meta's Llama 3 licence forbids synthetic generation of outputs if used to improve other models. Relevant Mistral, Qwen, and Yi models under the Apache 2.0 license are unrestricted for this purpose.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
I found this paper to be thought-provoking: "Smaller, Weaker, Yet Better: Training LLM Reasoners via Compute-Optimal Sampling" by Bansal, Hosseini, Agarwal, Tran, and Kazemi. https://arxiv.org/abs/2408.16737 The direct implication is that smaller models could be used to create cost-effective synthetic datasets. And on that note, in the Gemma terms of use, Google explicitly claims no rights on outputs generated from those models, which means one is free to synthgen from the Gemma line. Meta's Llama 3 licence forbids synthetic generation of outputs if used to improve other models. Relevant Mistral, Qwen, and Yi models under the Apache 2.0 license are unrestricted for this purpose.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65c992424936ab38ecf706b0/aq7vuHFPO1S93fwJk0Cuq.jpeg", "fullname": "Jim Lai", "name": "grimjim", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 163, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "anakin87", "AtAndDev", "tachyon-beep", "tommulder", "victor", "gghfez", "louisbrulenaudet", "djuna" ], "count": 9 }, { "reaction": "๐Ÿ‘", "users": [ "trollek", "AymaneElfirdo", "ajibawa-2023" ], "count": 3 }, { "reaction": "๐Ÿ”ฅ", "users": [ "aaditya", "tommulder" ], "count": 2 } ]
2024-08-31T13:48:47.000Z
2024-09-02T14:49:59.726Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/630f3e4002ce39336c411048/FXJON7b-aRUiH0_V2uRsi.jpeg", "fullname": "alkinun", "name": "AtAndDev", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 18, "isFollowing": false }, { "avatarUrl": "/avatars/52a153d04d325469e1be69bce610ebe5.svg", "fullname": "ecyht2", "name": "ecyht2", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false } ]
/posts/grimjim/872552437419473
3,225
2
461803347660596
[ { "type": "text", "value": "Just tried LitServe from the good folks at ", "raw": "Just tried LitServe from the good folks at ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@LightningAI", "resource": null, "url": null, "href": null, "user": "LightningAI", "lang": null, "code": null, "label": null }, { "type": "text", "value": "!", "raw": "!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Between llama.cpp and vLLM, there is a small gap where a few large models are not deployable!", "raw": "Between llama.cpp and vLLM, there is a small gap where a few large models are not deployable!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "That's where LitServe comes in!", "raw": "That's where LitServe comes in!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "LitServe is a high-throughput serving engine for AI models built on FastAPI.", "raw": "LitServe is a high-throughput serving engine for AI models built on FastAPI.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Yes, built on FastAPI. That's where the advantage and the issue lie.", "raw": "Yes, built on FastAPI. That's where the advantage and the issue lie.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "It's extremely flexible and supports multi-modality and a variety of models out of the box.", "raw": "It's extremely flexible and supports multi-modality and a variety of models out of the box.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "But in my testing, it lags far behind in speed compared to vLLM.", "raw": "But in my testing, it lags far behind in speed compared to vLLM.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Also, no OpenAI API-compatible endpoint is available as of now.", "raw": "Also, no OpenAI API-compatible endpoint is available as of now.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "But as we move to multi-modal models and agents, this serves as a good starting point. However, itโ€™s got to become faster...", "raw": "But as we move to multi-modal models and agents, this serves as a good starting point. However, itโ€™s got to become faster...", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "GitHub: ", "raw": "GitHub: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://github.com/Lightning-AI/LitServe", "resource": null, "url": null, "href": "https://github.com/Lightning-AI/LitServe", "user": null, "lang": null, "code": null, "label": null } ]
Just tried LitServe from the good folks at @LightningAI! Between llama.cpp and vLLM, there is a small gap where a few large models are not deployable! That's where LitServe comes in! LitServe is a high-throughput serving engine for AI models built on FastAPI. Yes, built on FastAPI. That's where the advantage and the issue lie. It's extremely flexible and supports multi-modality and a variety of models out of the box. But in my testing, it lags far behind in speed compared to vLLM. Also, no OpenAI API-compatible endpoint is available as of now. But as we move to multi-modal models and agents, this serves as a good starting point. However, itโ€™s got to become faster... GitHub: https://github.com/Lightning-AI/LitServe
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 197, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/XOmdrDLp3U0jXHvs912yB.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "victor", "theQuert" ], "count": 3 }, { "reaction": "๐Ÿš€", "users": [ "Norod78" ], "count": 1 } ]
2024-08-31T13:18:26.000Z
2024-08-31T13:18:26.748Z
[]
/posts/singhsidhukuldeep/461803347660596
866
0
817816295636972
[ { "type": "text", "value": "๐Ÿ™‹๐Ÿปโ€โ™‚๏ธHey there folks,", "raw": "๐Ÿ™‹๐Ÿปโ€โ™‚๏ธHey there folks,", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "just published a demo for Salesforce's new Function Calling Model ", "raw": "just published a demo for Salesforce's new Function Calling Model ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "inline_code", "value": null, "raw": "`Salesforce/xLAM`", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": "Salesforce/xLAM", "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- ", "raw": "- ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Tonic/Salesforce-Xlam-7b-r", "resource": { "type": "space", "id": "Tonic/Salesforce-Xlam-7b-r", "discussionNum": null }, "url": "https://huggingface.co/spaces/Tonic/Salesforce-Xlam-7b-r", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- ", "raw": "- ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Tonic/On-Device-Function-Calling", "resource": { "type": "space", "id": "Tonic/On-Device-Function-Calling", "discussionNum": null }, "url": "https://huggingface.co/spaces/Tonic/On-Device-Function-Calling", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "just try em out, and it comes with ", "raw": "just try em out, and it comes with ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "inline_code", "value": null, "raw": "`on-device`", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": "on-device", "label": null }, { "type": "text", "value": "version too ! cool ! ๐Ÿš€", "raw": "version too ! cool ! ๐Ÿš€", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
๐Ÿ™‹๐Ÿปโ€โ™‚๏ธHey there folks, just published a demo for Salesforce's new Function Calling Model `Salesforce/xLAM` - https://huggingface.co/spaces/Tonic/Salesforce-Xlam-7b-r - https://huggingface.co/spaces/Tonic/On-Device-Function-Calling just try em out, and it comes with `on-device`version too ! cool ! ๐Ÿš€
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 310, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 }, { "reaction": "๐Ÿ”ฅ", "users": [ "Deeran" ], "count": 1 } ]
2024-08-31T06:37:37.000Z
2024-08-31T06:37:37.030Z
[]
/posts/Tonic/817816295636972
791
0
880839703733954
[ { "type": "text", "value": "new synthetic general chat dataset! meet Supernova, a dataset using prompts from UltraFeedback and responses from Llama 3.1 405b Instruct: ", "raw": "new synthetic general chat dataset! meet Supernova, a dataset using prompts from UltraFeedback and responses from Llama 3.1 405b Instruct: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/sequelbox/Supernova", "resource": { "type": "dataset", "id": "sequelbox/Supernova", "discussionNum": null }, "url": "https://huggingface.co/datasets/sequelbox/Supernova", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "new model(s) using the Supernova dataset will follow next week, along with Other Things. (One of these will be a newly updated version of Enigma, utilizing the next version of ", "raw": "new model(s) using the Supernova dataset will follow next week, along with Other Things. (One of these will be a newly updated version of Enigma, utilizing the next version of ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/sequelbox/Tachibana", "resource": { "type": "dataset", "id": "sequelbox/Tachibana", "discussionNum": null }, "url": "https://huggingface.co/datasets/sequelbox/Tachibana", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " with approximately 2x the rows!)", "raw": " with approximately 2x the rows!)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
new synthetic general chat dataset! meet Supernova, a dataset using prompts from UltraFeedback and responses from Llama 3.1 405b Instruct: https://huggingface.co/datasets/sequelbox/Supernova new model(s) using the Supernova dataset will follow next week, along with Other Things. (One of these will be a newly updated version of Enigma, utilizing the next version of https://huggingface.co/datasets/sequelbox/Tachibana with approximately 2x the rows!)
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63444f2687964b331809eb55/WvZivsvKsM_t0tBtakovK.png", "fullname": "t.d.a.g.", "name": "sequelbox", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 50, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "djuna", "kristaller486", "osanseviero" ], "count": 4 } ]
2024-08-30T20:11:29.000Z
2024-08-30T20:11:58.460Z
[]
/posts/sequelbox/880839703733954
824
0
583635727849608
[ { "type": "text", "value": "Very excited to have made the list and been invited to OpenAI DevDay 2024 at the London event 30 October! Looking forward to seeing what the future of AI dev holds, connecting with other professionals in the field, and advocating for open source AI!", "raw": "Very excited to have made the list and been invited to OpenAI DevDay 2024 at the London event 30 October! Looking forward to seeing what the future of AI dev holds, connecting with other professionals in the field, and advocating for open source AI!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://openai.com/devday/", "resource": null, "url": null, "href": "https://openai.com/devday/", "user": null, "lang": null, "code": null, "label": null } ]
Very excited to have made the list and been invited to OpenAI DevDay 2024 at the London event 30 October! Looking forward to seeing what the future of AI dev holds, connecting with other professionals in the field, and advocating for open source AI! https://openai.com/devday/
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/656e3808d4de03a07d116850/JZh4lrjFueJZVqugjoloP.jpeg", "fullname": "Kenneth Hamilton", "name": "ZennyKenny", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 32, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-08-30T18:06:10.000Z
2024-08-30T18:06:10.577Z
[]
/posts/ZennyKenny/583635727849608
692
0
421434775993783
[ { "type": "text", "value": "๐Ÿ’พ๐Ÿง Want to know how much VRAM you will need for training your model? ๐Ÿ’พ๐Ÿง ", "raw": "๐Ÿ’พ๐Ÿง Want to know how much VRAM you will need for training your model? ๐Ÿ’พ๐Ÿง ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Now you can use this app in which you can input a torch/tensorflow summary or the parameters count and get an estimate of the required memory!", "raw": "Now you can use this app in which you can input a torch/tensorflow summary or the parameters count and get an estimate of the required memory!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Use it in: howmuchvram.com ", "raw": "Use it in: howmuchvram.com ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Also, everything is Open Source so you can contribute in repo: ", "raw": "Also, everything is Open Source so you can contribute in repo: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://github.com/AlexBodner/How_Much_VRAM", "resource": null, "url": null, "href": "https://github.com/AlexBodner/How_Much_VRAM", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Leave it a starโญ", "raw": "Leave it a starโญ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
๐Ÿ’พ๐Ÿง Want to know how much VRAM you will need for training your model? ๐Ÿ’พ๐Ÿง  Now you can use this app in which you can input a torch/tensorflow summary or the parameters count and get an estimate of the required memory! Use it in: howmuchvram.com Also, everything is Open Source so you can contribute in repo: https://github.com/AlexBodner/How_Much_VRAM Leave it a starโญ
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/658880d499ed106ac888dd7a/wMv9-ZsJUw4QQnld_cci7.jpeg", "fullname": "Alexander Dylan Bodner", "name": "AlexBodner", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 28, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "Mehyaar", "MexIvanov", "Bruhn", "den0620", "AtAndDev" ], "count": 5 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "AtAndDev" ], "count": 2 } ]
2024-08-30T17:49:25.000Z
2024-08-30T17:49:25.232Z
[]
/posts/AlexBodner/421434775993783
1,586
0
416542379891081
[ { "type": "text", "value": "Shakker-Labs brings an amazing LoRA trained on FLUX.1-dev for blended realistic illustration by Muertu ๐Ÿ˜ the front character is in illustration style, while the background is realistic. ๐Ÿคฉ", "raw": "Shakker-Labs brings an amazing LoRA trained on FLUX.1-dev for blended realistic illustration by Muertu ๐Ÿ˜ the front character is in illustration style, while the background is realistic. ๐Ÿคฉ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿค™Model: ", "raw": "๐Ÿค™Model: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/Shakker-Labs/FLUX.1-dev-LoRA-blended-realistic-illustration", "resource": null, "url": null, "href": "https://huggingface.co/Shakker-Labs/FLUX.1-dev-LoRA-blended-realistic-illustration", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ™‡โ€โ™‚๏ธMy space for demo: ", "raw": "๐Ÿ™‡โ€โ™‚๏ธMy space for demo: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/vilarin/flux-lab-light", "resource": { "type": "space", "id": "vilarin/flux-lab-light", "discussionNum": null }, "url": "https://huggingface.co/spaces/vilarin/flux-lab-light", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
Shakker-Labs brings an amazing LoRA trained on FLUX.1-dev for blended realistic illustration by Muertu ๐Ÿ˜ the front character is in illustration style, while the background is realistic. ๐Ÿคฉ ๐Ÿค™Model: https://huggingface.co/Shakker-Labs/FLUX.1-dev-LoRA-blended-realistic-illustration ๐Ÿ™‡โ€โ™‚๏ธMy space for demo: https://huggingface.co/spaces/vilarin/flux-lab-light
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/642827944fe87caede802784/a7s3Ub9Cy6-PuuaX8wwXm.png", "fullname": "VILARIN", "name": "vilarin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 68, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/642827944fe87caede802784/oz-yfW-ou3-NT1uhcL6DK.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/642827944fe87caede802784/yMXRIQ4gDlK1fPBVLZYYO.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/642827944fe87caede802784/pqpg26PSUbsrf1run2-LX.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/642827944fe87caede802784/5REmkwmEdfOz-dcinakUU.png" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "orrinin", "YaTharThShaRma999", "John6666", "chris-fung", "djuna", "alielfilali01", "ngxson", "Felladrin", "wanghaofan", "AtAndDev", "keakohv", "louisbrulenaudet" ], "count": 12 }, { "reaction": "โค๏ธ", "users": [ "Amr-khaled", "AtAndDev" ], "count": 2 } ]
2024-08-30T16:51:15.000Z
2024-09-05T05:33:55.982Z
[]
/posts/vilarin/416542379891081
2,448
0
580555903414737
[ { "type": "text", "value": "Made a fun Space powered by Llama 405B for creating real, working react apps with the awesome plus that you can contribute to an open react dataset by upvoting or downvoting the response ๐Ÿค—.", "raw": "Made a fun Space powered by Llama 405B for creating real, working react apps with the awesome plus that you can contribute to an open react dataset by upvoting or downvoting the response ๐Ÿค—.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/cfahlgren1/llama-artifacts", "resource": { "type": "space", "id": "cfahlgren1/llama-artifacts", "discussionNum": null }, "url": "https://huggingface.co/spaces/cfahlgren1/llama-artifacts", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/cfahlgren1/react-code-instructions", "resource": { "type": "dataset", "id": "cfahlgren1/react-code-instructions", "discussionNum": null }, "url": "https://huggingface.co/datasets/cfahlgren1/react-code-instructions", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
Made a fun Space powered by Llama 405B for creating real, working react apps with the awesome plus that you can contribute to an open react dataset by upvoting or downvoting the response ๐Ÿค—. https://huggingface.co/spaces/cfahlgren1/llama-artifacts https://huggingface.co/datasets/cfahlgren1/react-code-instructions
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/648a374f00f7a3374ee64b99/YPwSOrronoozwHbJchPn3.jpeg", "fullname": "Caleb Fahlgren", "name": "cfahlgren1", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 103, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/648a374f00f7a3374ee64b99/PcRRDxywqQxW04zptVdp3.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 }, { "reaction": "๐Ÿ”ฅ", "users": [ "cloudjumbo" ], "count": 1 } ]
2024-08-30T15:32:53.000Z
2024-08-30T15:35:08.779Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/648a374f00f7a3374ee64b99/YPwSOrronoozwHbJchPn3.jpeg", "fullname": "Caleb Fahlgren", "name": "cfahlgren1", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 103, "isFollowing": false } ]
/posts/cfahlgren1/580555903414737
1,129
1
255000504996462
[ { "type": "text", "value": "Here's a 1-minute video tutorial on how to fine-tune ", "raw": "Here's a 1-minute video tutorial on how to fine-tune ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/unsloth/llama-3-8b-bnb-4bit", "resource": { "type": "model", "id": "unsloth/llama-3-8b-bnb-4bit", "discussionNum": null }, "url": "https://huggingface.co/unsloth/llama-3-8b-bnb-4bit", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " with unsloth", "raw": " with unsloth", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Using Roller Coaster Tycoon peep thoughts as an example", "raw": "Using Roller Coaster Tycoon peep thoughts as an example", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
Here's a 1-minute video tutorial on how to fine-tune https://huggingface.co/unsloth/llama-3-8b-bnb-4bit with unsloth Using Roller Coaster Tycoon peep thoughts as an example
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672164046414-624b4a964056e2a6914a05c5.png", "fullname": "Dylan Ebert", "name": "dylanebert", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 1743, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/624b4a964056e2a6914a05c5/VkNapafGp7lrlLbyJU2e8.mp4" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "victor", "John6666", "prithivMLmods", "KingNish", "thisisanshgupta", "Bruhn", "budotsmedia", "AtAndDev", "mambiux" ], "count": 9 } ]
2024-08-30T15:27:11.000Z
2024-08-30T15:27:11.620Z
[]
/posts/dylanebert/255000504996462
2,514
0
917996280846812
[ { "type": "text", "value": "AI in the News: Llama 10x growth, Apple & Nvidia in talks with OpenAI, universal basic income, AI & art", "raw": "AI in the News: Llama 10x growth, Apple & Nvidia in talks with OpenAI, universal basic income, AI & art", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "* Meta leads open-source AI boom, Llama downloads surge 10x year-over-year - VB", "raw": "* Meta leads open-source AI boom, Llama downloads surge 10x year-over-year - VB", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://venturebeat.com/ai/meta-leads-open-source-ai-boom-llama-downloads-surge-10x-year-over-year/", "resource": null, "url": null, "href": "https://venturebeat.com/ai/meta-leads-open-source-ai-boom-llama-downloads-surge-10x-year-over-year/", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "* Apple, Nvidia Are in Talks to Invest in OpenAI - WSJ", "raw": "* Apple, Nvidia Are in Talks to Invest in OpenAI - WSJ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://www.wsj.com/tech/ai/openai-apple-funding-chatgpt-50754cd6?mod=rss_Technology", "resource": null, "url": null, "href": "https://www.wsj.com/tech/ai/openai-apple-funding-chatgpt-50754cd6?mod=rss_Technology", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "* The Report Card on Guaranteed Income Is Still Incomplete - NYT", "raw": "* The Report Card on Guaranteed Income Is Still Incomplete - NYT", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://www.nytimes.com/2024/08/30/business/economy/the-report-card-on-guaranteed-income-is-still-incomplete.html", "resource": null, "url": null, "href": "https://www.nytimes.com/2024/08/30/business/economy/the-report-card-on-guaranteed-income-is-still-incomplete.html", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "* Ethically dubious or a creative gift? How artists are grappling with AI in their work - The Guardian", "raw": "* Ethically dubious or a creative gift? How artists are grappling with AI in their work - The Guardian", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://www.theguardian.com/artanddesign/article/2024/aug/30/xanthe-dobbie-futuer-sex-love-sounds-ai-video-celebrity-clones", "resource": null, "url": null, "href": "https://www.theguardian.com/artanddesign/article/2024/aug/30/xanthe-dobbie-futuer-sex-love-sounds-ai-video-celebrity-clones", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Want more? Subscribe to my daily newsletter!", "raw": "Want more? Subscribe to my daily newsletter!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://linkedin.com/build-relation/newsletter-follow?entityUrn=7233909926606053377", "resource": null, "url": null, "href": "https://linkedin.com/build-relation/newsletter-follow?entityUrn=7233909926606053377", "user": null, "lang": null, "code": null, "label": null } ]
AI in the News: Llama 10x growth, Apple & Nvidia in talks with OpenAI, universal basic income, AI & art * Meta leads open-source AI boom, Llama downloads surge 10x year-over-year - VB https://venturebeat.com/ai/meta-leads-open-source-ai-boom-llama-downloads-surge-10x-year-over-year/ * Apple, Nvidia Are in Talks to Invest in OpenAI - WSJ https://www.wsj.com/tech/ai/openai-apple-funding-chatgpt-50754cd6?mod=rss_Technology * The Report Card on Guaranteed Income Is Still Incomplete - NYT https://www.nytimes.com/2024/08/30/business/economy/the-report-card-on-guaranteed-income-is-still-incomplete.html * Ethically dubious or a creative gift? How artists are grappling with AI in their work - The Guardian https://www.theguardian.com/artanddesign/article/2024/aug/30/xanthe-dobbie-futuer-sex-love-sounds-ai-video-celebrity-clones Want more? Subscribe to my daily newsletter! https://linkedin.com/build-relation/newsletter-follow?entityUrn=7233909926606053377
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg", "fullname": "Florent Daudens", "name": "fdaudens", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 364, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-08-30T13:55:59.000Z
2024-08-30T13:55:59.187Z
[]
/posts/fdaudens/917996280846812
447
0
357701279407928
[ { "type": "text", "value": "Sharing for anyone using Diffusers ", "raw": "Sharing for anyone using Diffusers ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "inline_code", "value": null, "raw": "`from_single_file`", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": "from_single_file", "label": null }, { "type": "text", "value": " loading and affected by the Runway SD 1.5 issue.", "raw": " loading and affected by the Runway SD 1.5 issue.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "If you have ", "raw": "If you have ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "inline_code", "value": null, "raw": "`runwayml/stable-diffusion-v1-5`", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": "runwayml/stable-diffusion-v1-5", "label": null }, { "type": "text", "value": " saved locally in your HF cache then loading single file checkpoints in the following way should still work. ", "raw": " saved locally in your HF cache then loading single file checkpoints in the following way should still work. ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "code_fence", "value": null, "raw": "```\nfrom diffusers import StableDiffusionPipeline\n\npipe = StableDiffusionPipeline.from_single_file(\"<url or path to single file checkpoint>\")\n```", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": "from diffusers import StableDiffusionPipeline\n\npipe = StableDiffusionPipeline.from_single_file(\"<url or path to single file checkpoint>\")", "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "If you do not have the model repo saved in your cache, then automatically inferring the pipeline config will not work since the reference repo ", "raw": "If you do not have the model repo saved in your cache, then automatically inferring the pipeline config will not work since the reference repo ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "inline_code", "value": null, "raw": "`runwayml/stable-diffusion-v1-5`", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": "runwayml/stable-diffusion-v1-5", "label": null }, { "type": "text", "value": " doesn't exist anymore. ", "raw": " doesn't exist anymore. ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "You can use an alternative SD1.5 repo id to still configure your pipeline.", "raw": "You can use an alternative SD1.5 repo id to still configure your pipeline.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "code_fence", "value": null, "raw": "```\nfrom diffusers import StableDiffusionPipeline\n\npipe = StableDiffusionPipeline.from_single_file(\"<url or path to single file checkpoint>\", config=\"Lykon/DreamShaper\")\n```", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": "from diffusers import StableDiffusionPipeline\n\npipe = StableDiffusionPipeline.from_single_file(\"<url or path to single file checkpoint>\", config=\"Lykon/DreamShaper\")", "label": null }, { "type": "text", "value": " ", "raw": " ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "We're working on resolving the issue ASAP. ", "raw": "We're working on resolving the issue ASAP. ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
Sharing for anyone using Diffusers `from_single_file` loading and affected by the Runway SD 1.5 issue. If you have `runwayml/stable-diffusion-v1-5` saved locally in your HF cache then loading single file checkpoints in the following way should still work. ``` from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_single_file("<url or path to single file checkpoint>") ``` If you do not have the model repo saved in your cache, then automatically inferring the pipeline config will not work since the reference repo `runwayml/stable-diffusion-v1-5` doesn't exist anymore. You can use an alternative SD1.5 repo id to still configure your pipeline. ``` from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_single_file("<url or path to single file checkpoint>", config="Lykon/DreamShaper") ``` We're working on resolving the issue ASAP.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1630334896986-6126e46848005fa9ca5c578c.jpeg", "fullname": "Dhruv Nair", "name": "dn6", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 35, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "clem", "wardred1962", "Nymbo" ], "count": 4 }, { "reaction": "โค๏ธ", "users": [ "clem", "sayakpaul", "aaditya", "Nymbo" ], "count": 4 }, { "reaction": "๐Ÿ‘", "users": [ "John6666", "clem", "Nymbo" ], "count": 3 } ]
2024-08-30T05:39:38.000Z
2024-09-11T08:24:39.918Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 384, "isFollowing": false } ]
/posts/dn6/357701279407928
2,539
2
672926050183277
[ { "type": "text", "value": "The only 405B spaces still freely accessible are powered by SN fast api. ", "raw": "The only 405B spaces still freely accessible are powered by SN fast api. ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/xianbao/SambaNova-fast", "resource": { "type": "space", "id": "xianbao/SambaNova-fast", "discussionNum": null }, "url": "https://huggingface.co/spaces/xianbao/SambaNova-fast", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://sambanova.ai/fast-api?api_ref=907266", "resource": null, "url": null, "href": "https://sambanova.ai/fast-api?api_ref=907266", "user": null, "lang": null, "code": null, "label": null } ]
The only 405B spaces still freely accessible are powered by SN fast api. https://huggingface.co/spaces/xianbao/SambaNova-fast https://sambanova.ai/fast-api?api_ref=907266
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62140dcdcf7928035e8135ad/FTiirwS_L6IaLHmHwIo2g.png", "fullname": "Kaizhao Liang", "name": "kz919", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 34, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/62140dcdcf7928035e8135ad/NNrFwd6s5BM2fTpWmX3px.jpeg" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666", "clem", "AtAndDev", "kz919", "Amr-khaled", "louisbrulenaudet" ], "count": 6 }, { "reaction": "๐Ÿ”ฅ", "users": [ "kz919", "alielfilali01", "KingNish", "tousif1988" ], "count": 4 }, { "reaction": "๐Ÿค—", "users": [ "kz919", "andito" ], "count": 2 }, { "reaction": "๐Ÿ˜Ž", "users": [ "kz919" ], "count": 1 } ]
2024-08-30T03:04:03.000Z
2024-08-30T03:04:03.244Z
[]
/posts/kz919/672926050183277
1,680
0
914900735326223
[ { "type": "text", "value": "The word 'Lead' has three definitions. When an LLM model tokenizes this word, it is always the same token. Imagine being able to put any particular embedding at any particular time into a 'Quantum State'. When an Embedding is in a Quantum State, the word token could have up to 3 different meanings (x1, x2, x3). The Quantum State gets collapsed based on the individual context surrounding the word. 'Jill lead Joy to the store' would collapse to x1. 'Jill and Joy stumbled upon a pile of lead' would collapse to x3. Very simple, right? This method produces OFF THE CHARTS results:", "raw": "The word 'Lead' has three definitions. When an LLM model tokenizes this word, it is always the same token. Imagine being able to put any particular embedding at any particular time into a 'Quantum State'. When an Embedding is in a Quantum State, the word token could have up to 3 different meanings (x1, x2, x3). The Quantum State gets collapsed based on the individual context surrounding the word. 'Jill lead Joy to the store' would collapse to x1. 'Jill and Joy stumbled upon a pile of lead' would collapse to x3. Very simple, right? This method produces OFF THE CHARTS results:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://www.youtube.com/watch?v=tuQI6A-EOqE", "resource": null, "url": null, "href": "https://www.youtube.com/watch?v=tuQI6A-EOqE", "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " ", "raw": " ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
The word 'Lead' has three definitions. When an LLM model tokenizes this word, it is always the same token. Imagine being able to put any particular embedding at any particular time into a 'Quantum State'. When an Embedding is in a Quantum State, the word token could have up to 3 different meanings (x1, x2, x3). The Quantum State gets collapsed based on the individual context surrounding the word. 'Jill lead Joy to the store' would collapse to x1. 'Jill and Joy stumbled upon a pile of lead' would collapse to x3. Very simple, right? This method produces OFF THE CHARTS results: https://www.youtube.com/watch?v=tuQI6A-EOqE
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png", "fullname": "Richard A Aragon", "name": "TuringsSolutions", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 148, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿง ", "users": [ "maximuspowers", "maier-s", "nicolollo" ], "count": 3 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-08-29T21:39:19.000Z
2024-08-29T21:39:19.548Z
[]
/posts/TuringsSolutions/914900735326223
1,405
0
230212031259808
[ { "type": "text", "value": "Continuing my streak by releasing the Wikireading dataset: a large collection of scraped non-fiction books predominantly in Russian language.", "raw": "Continuing my streak by releasing the Wikireading dataset: a large collection of scraped non-fiction books predominantly in Russian language.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/its5Q/wikireading", "resource": { "type": "dataset", "id": "its5Q/wikireading", "discussionNum": null }, "url": "https://huggingface.co/datasets/its5Q/wikireading", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Here's the highlights:", "raw": "Here's the highlights:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- ~7B tokens, or ~28B characters, making it a great candidate for use in pretraining", "raw": "- ~7B tokens, or ~28B characters, making it a great candidate for use in pretraining", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Contains non-fiction works from many knowledge domains", "raw": "- Contains non-fiction works from many knowledge domains", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Includes both the original HTML and extracted text of book chapters", "raw": "- Includes both the original HTML and extracted text of book chapters", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
Continuing my streak by releasing the Wikireading dataset: a large collection of scraped non-fiction books predominantly in Russian language. https://huggingface.co/datasets/its5Q/wikireading Here's the highlights: - ~7B tokens, or ~28B characters, making it a great candidate for use in pretraining - Contains non-fiction works from many knowledge domains - Includes both the original HTML and extracted text of book chapters
{ "avatarUrl": "/avatars/a692e2e2a3b0222e2f8cdfc44ac8d64c.svg", "fullname": "its5Q", "name": "its5Q", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 14, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "lukmanaj", "clem", "kristaller486", "nyuuzyou" ], "count": 4 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 }, { "reaction": "โค๏ธ", "users": [ "clem" ], "count": 1 } ]
2024-08-29T18:36:41.000Z
2024-08-29T18:36:41.732Z
[]
/posts/its5Q/230212031259808
1,276
0
672761214253429
[ { "type": "text", "value": "Thought this was an interesting graphic from the EAGLE blog post. It made me wonder if certain sampling methods have been shown to work better for certain tasks.", "raw": "Thought this was an interesting graphic from the EAGLE blog post. It made me wonder if certain sampling methods have been shown to work better for certain tasks.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Does anyone know of any work looking at trends in the output token probability distribution by task type? (or similar) ", "raw": "Does anyone know of any work looking at trends in the output token probability distribution by task type? (or similar) ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Source: ", "raw": "Source: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://sites.google.com/view/eagle-llm", "resource": null, "url": null, "href": "https://sites.google.com/view/eagle-llm", "user": null, "lang": null, "code": null, "label": null } ]
Thought this was an interesting graphic from the EAGLE blog post. It made me wonder if certain sampling methods have been shown to work better for certain tasks. Does anyone know of any work looking at trends in the output token probability distribution by task type? (or similar) Source: https://sites.google.com/view/eagle-llm
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/651d4e73acd8e9168ac92b04/WMYCWKx9MM8Xxj8vXursD.png", "fullname": "Jonah Ramponi", "name": "jonah-ramponi", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/651d4e73acd8e9168ac92b04/775TUAesRzcshWIVKmo_G.png" } ]
[]
[ { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-08-29T18:06:07.000Z
2024-08-29T18:06:44.888Z
[]
/posts/jonah-ramponi/672761214253429
496
0
858442795091051
[ { "type": "text", "value": "Automated web scraping with playwright is becoming easier by the day. Now, using ollama tool calling, its possible to perform very high accuracy web scraping (in some cases 100% accurate) through just asking an LLM to scrape the content for you. ", "raw": "Automated web scraping with playwright is becoming easier by the day. Now, using ollama tool calling, its possible to perform very high accuracy web scraping (in some cases 100% accurate) through just asking an LLM to scrape the content for you. ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "This can be completed in a multistep process similar to cohere's platform. If you have tried the cohere playground with web scraping, this will feel very similar. In my experience, the Llama 3.1 version is much better due to the larger context window. Both tools are great, but the difference is the ollama + playwright version is completely controlled by you. ", "raw": "This can be completed in a multistep process similar to cohere's platform. If you have tried the cohere playground with web scraping, this will feel very similar. In my experience, the Llama 3.1 version is much better due to the larger context window. Both tools are great, but the difference is the ollama + playwright version is completely controlled by you. ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "All you need to do is wrap your scraper in a function:", "raw": "All you need to do is wrap your scraper in a function:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "code_fence", "value": null, "raw": "```\n async def query_web_scraper(url: str) -> dict:\n scraper = WebScraper(headless=False)\n return await scraper.query_page_content(url)\n```", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": " async def query_web_scraper(url: str) -> dict:\n scraper = WebScraper(headless=False)\n return await scraper.query_page_content(url)", "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "and then make your request:", "raw": "and then make your request:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "code_fence", "value": null, "raw": "```\n# First API call: Send the query and function description to the model\nresponse = ollama.chat(\n model=model,\n messages=messages,\n tools=[\n {\n 'type': 'function',\n 'function': {\n 'name': 'query_web_scraper',\n 'description': 'Scrapes the content of a web page and returns the structured JSON object with titles, articles, and associated links.',\n 'parameters': {\n 'type': 'object',\n 'properties': {\n 'url': {\n 'type': 'string',\n 'description': 'The URL of the web page to scrape.',\n },\n },\n 'required': ['url'],\n },\n },\n },\n ]\n)\n```", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": "# First API call: Send the query and function description to the model\nresponse = ollama.chat(\n model=model,\n messages=messages,\n tools=[\n {\n 'type': 'function',\n 'function': {\n 'name': 'query_web_scraper',\n 'description': 'Scrapes the content of a web page and returns the structured JSON object with titles, articles, and associated links.',\n 'parameters': {\n 'type': 'object',\n 'properties': {\n 'url': {\n 'type': 'string',\n 'description': 'The URL of the web page to scrape.',\n },\n },\n 'required': ['url'],\n },\n },\n },\n ]\n)", "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "To learn more:", "raw": "To learn more:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Github w/ Playground: ", "raw": "Github w/ Playground: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://github.com/tdolan21/tool-calling-playground/blob/main/notebooks/ollama-playwright-web-scraping.ipynb", "resource": null, "url": null, "href": "https://github.com/tdolan21/tool-calling-playground/blob/main/notebooks/ollama-playwright-web-scraping.ipynb", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Complete Guide: ", "raw": "Complete Guide: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://medium.com/@tdolan21/building-an-llm-powered-web-scraper-with-ollama-and-playwright-6274d5d938b5", "resource": null, "url": null, "href": "https://medium.com/@tdolan21/building-an-llm-powered-web-scraper-with-ollama-and-playwright-6274d5d938b5", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
Automated web scraping with playwright is becoming easier by the day. Now, using ollama tool calling, its possible to perform very high accuracy web scraping (in some cases 100% accurate) through just asking an LLM to scrape the content for you. This can be completed in a multistep process similar to cohere's platform. If you have tried the cohere playground with web scraping, this will feel very similar. In my experience, the Llama 3.1 version is much better due to the larger context window. Both tools are great, but the difference is the ollama + playwright version is completely controlled by you. All you need to do is wrap your scraper in a function: ``` async def query_web_scraper(url: str) -> dict: scraper = WebScraper(headless=False) return await scraper.query_page_content(url) ``` and then make your request: ``` # First API call: Send the query and function description to the model response = ollama.chat( model=model, messages=messages, tools=[ { 'type': 'function', 'function': { 'name': 'query_web_scraper', 'description': 'Scrapes the content of a web page and returns the structured JSON object with titles, articles, and associated links.', 'parameters': { 'type': 'object', 'properties': { 'url': { 'type': 'string', 'description': 'The URL of the web page to scrape.', }, }, 'required': ['url'], }, }, }, ] ) ``` To learn more: Github w/ Playground: https://github.com/tdolan21/tool-calling-playground/blob/main/notebooks/ollama-playwright-web-scraping.ipynb Complete Guide: https://medium.com/@tdolan21/building-an-llm-powered-web-scraper-with-ollama-and-playwright-6274d5d938b5
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6455cc8d679315e4ef16fbec/M6Cfifn05BUzkCFd2QDIT.png", "fullname": "Tim Dolan", "name": "macadeliccc", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 152, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6455cc8d679315e4ef16fbec/hVNJY2mBa3mNtCXWFGaKf.mp4" } ]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "RalFinger", "xsa-dev", "wsuff", "alielfilali01", "Bruhn" ], "count": 5 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "alielfilali01", "louisbrulenaudet" ], "count": 3 } ]
2024-08-29T16:24:10.000Z
2024-08-29T18:52:11.663Z
[]
/posts/macadeliccc/858442795091051
1,576
0
676807179049479
[ { "type": "text", "value": "Simplified implementation of โ€œNeural Networks are Decision Treesโ€.", "raw": "Simplified implementation of โ€œNeural Networks are Decision Treesโ€.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Showing that any neural network with any activation function can be represented as a decision tree. Since decision trees are inherently interpretable, their equivalence helps us understand how the network makes decisions.", "raw": "Showing that any neural network with any activation function can be represented as a decision tree. Since decision trees are inherently interpretable, their equivalence helps us understand how the network makes decisions.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "In this implementation, we trained a simple neural network for 1k epochs on makemoons, saved the trained weights (state dicts), extracted the decision tree equivalent from the trained weight then visualize and evaluate.", "raw": "In this implementation, we trained a simple neural network for 1k epochs on makemoons, saved the trained weights (state dicts), extracted the decision tree equivalent from the trained weight then visualize and evaluate.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Code: ", "raw": "Code: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://github.com/Jaykef/ai-algorithms/blob/main/nns_are%20decision_trees.ipynb", "resource": null, "url": null, "href": "https://github.com/Jaykef/ai-algorithms/blob/main/nns_are%20decision_trees.ipynb", "user": null, "lang": null, "code": null, "label": null } ]
Simplified implementation of โ€œNeural Networks are Decision Treesโ€. Showing that any neural network with any activation function can be represented as a decision tree. Since decision trees are inherently interpretable, their equivalence helps us understand how the network makes decisions. In this implementation, we trained a simple neural network for 1k epochs on makemoons, saved the trained weights (state dicts), extracted the decision tree equivalent from the trained weight then visualize and evaluate. Code: https://github.com/Jaykef/ai-algorithms/blob/main/nns_are%20decision_trees.ipynb
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg", "fullname": "Jaward Sesay", "name": "Jaward", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 189, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/6nh24JDkHq7mrrImKoPkz.mp4" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/DIk59sBCsWQKZsHIPREMA.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/hMMlU3QzSO7ELzoU0Kre2.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/2YXz5KFV8-OAhdreRFciY.png" } ]
[]
[ { "reaction": "๐Ÿง ", "users": [ "prithivMLmods", "John6666", "maier-s", "jsulz", "AtAndDev" ], "count": 5 }, { "reaction": "๐Ÿ”ฅ", "users": [ "rajveer43", "AtAndDev" ], "count": 2 } ]
2024-08-29T14:44:28.000Z
2024-08-29T21:52:51.847Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6316fb937b0ee0136e5f1220/poHBoJ7QAF_s2CCaosdvQ.jpeg", "fullname": "Firstname Lastname", "name": "takeraparterer", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 29, "isFollowing": false } ]
/posts/Jaward/676807179049479
1,327
1
325819090005748
[ { "type": "mention", "value": null, "raw": "@victor", "resource": null, "url": null, "href": null, "user": "victor", "lang": null, "code": null, "label": null }, { "type": "text", "value": " ", "raw": " ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Excuse me.", "raw": "Excuse me.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "I would like to report the following bug or new specification that is probably the cause of the fatal stacks that are occurring in the Zero GPU space throughout HF.", "raw": "I would like to report the following bug or new specification that is probably the cause of the fatal stacks that are occurring in the Zero GPU space throughout HF.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Thanks.", "raw": "Thanks.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/zero-gpu-explorers/README/discussions/104", "resource": { "type": "space", "id": "zero-gpu-explorers/README", "discussionNum": 104 }, "url": "https://huggingface.co/spaces/zero-gpu-explorers/README/discussions/104", "href": null, "user": null, "lang": null, "code": null, "label": null } ]
@victor Excuse me. I would like to report the following bug or new specification that is probably the cause of the fatal stacks that are occurring in the Zero GPU space throughout HF. Thanks. https://huggingface.co/spaces/zero-gpu-explorers/README/discussions/104
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 384, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg", "fullname": "Victor Mustar", "name": "victor", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 2578 } ]
[ { "reaction": "๐Ÿ‘€", "users": [ "victor" ], "count": 1 } ]
2024-08-29T13:56:20.000Z
2024-09-06T14:59:07.538Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg", "fullname": "Victor Mustar", "name": "victor", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 2578, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 384, "isFollowing": false } ]
/posts/John6666/325819090005748
2,798
3
396620243063150
[ { "type": "text", "value": "NVIDIA just dropped NVEagle ๐Ÿฆ…", "raw": "NVIDIA just dropped NVEagle ๐Ÿฆ…", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Super impressive vision language model that comes in 7B, 13B and 13B fine-tuned on chat ๐Ÿ’ฌ", "raw": "Super impressive vision language model that comes in 7B, 13B and 13B fine-tuned on chat ๐Ÿ’ฌ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Model repositories: ", "raw": "Model repositories: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/merve/nveagle-66d0705108582d73bb235c26", "resource": { "type": "collection", "id": "merve/nveagle-66d0705108582d73bb235c26", "discussionNum": null }, "url": "https://huggingface.co/collections/merve/nveagle-66d0705108582d73bb235c26", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Try it: ", "raw": "Try it: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/NVEagle/Eagle-X5-13B-Chat", "resource": { "type": "space", "id": "NVEagle/Eagle-X5-13B-Chat", "discussionNum": null }, "url": "https://huggingface.co/spaces/NVEagle/Eagle-X5-13B-Chat", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " ๐Ÿ’ฌ (works very well! ๐Ÿคฏ)", "raw": " ๐Ÿ’ฌ (works very well! ๐Ÿคฏ)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "This model essentially explores having different experts (MoE) for image encoder part of vision language model. ", "raw": "This model essentially explores having different experts (MoE) for image encoder part of vision language model. ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "How? ๐Ÿง", "raw": "How? ๐Ÿง", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "The authors concatenate the vision encoder output tokens together, and they apply \"pre-alignment\" essentially fine-tune experts with frozen text encoder. ", "raw": "The authors concatenate the vision encoder output tokens together, and they apply \"pre-alignment\" essentially fine-tune experts with frozen text encoder. ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Then they freeze both experts and the decoder and just train the projection layer, and finally, they unfreeze everything for supervised fine-tuning โœจ", "raw": "Then they freeze both experts and the decoder and just train the projection layer, and finally, they unfreeze everything for supervised fine-tuning โœจ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "In the paper, they explore different fusion strategies and vision encoders, extending basic CLIP encoder, and figure out simply concatenating visual tokens works well.", "raw": "In the paper, they explore different fusion strategies and vision encoders, extending basic CLIP encoder, and figure out simply concatenating visual tokens works well.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Rest of the architecture is quite similar to LLaVA. (see below the architecture) ", "raw": "Rest of the architecture is quite similar to LLaVA. (see below the architecture) ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
NVIDIA just dropped NVEagle ๐Ÿฆ… Super impressive vision language model that comes in 7B, 13B and 13B fine-tuned on chat ๐Ÿ’ฌ Model repositories: https://huggingface.co/collections/merve/nveagle-66d0705108582d73bb235c26 Try it: https://huggingface.co/spaces/NVEagle/Eagle-X5-13B-Chat ๐Ÿ’ฌ (works very well! ๐Ÿคฏ) This model essentially explores having different experts (MoE) for image encoder part of vision language model. How? ๐Ÿง The authors concatenate the vision encoder output tokens together, and they apply "pre-alignment" essentially fine-tune experts with frozen text encoder. Then they freeze both experts and the decoder and just train the projection layer, and finally, they unfreeze everything for supervised fine-tuning โœจ In the paper, they explore different fusion strategies and vision encoders, extending basic CLIP encoder, and figure out simply concatenating visual tokens works well. Rest of the architecture is quite similar to LLaVA. (see below the architecture)
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png", "fullname": "Merve Noyan", "name": "merve", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5520, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6141a88b3a0ec78603c9e784/jOo9XXc-t6CNA07Aca83n.png" } ]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "sagar007", "Kukedlc", "John6666", "khanhnamle1994", "maximuspowers", "alielfilali01", "Arakinas", "darkzbaron", "damerajee", "Tahahah", "andito", "noobmldude" ], "count": 12 }, { "reaction": "๐Ÿš€", "users": [ "damerajee", "Tahahah", "louisbrulenaudet", "andito" ], "count": 4 } ]
2024-08-29T13:28:54.000Z
2024-08-29T13:28:54.254Z
[]
/posts/merve/396620243063150
2,364
0
644265186134493
[ { "type": "text", "value": " ๐Ÿ“ซ A packed AI in the News edition today!", "raw": " ๐Ÿ“ซ A packed AI in the News edition today!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ“‰ Nvidia Revenue Jumps 122% in Positive Sign for Tech's A.I. Boom - NYT", "raw": "๐Ÿ“‰ Nvidia Revenue Jumps 122% in Positive Sign for Tech's A.I. Boom - NYT", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- $30.04 billion revenue, $16.95 billion net income (up from $6.19 billion a year ago)", "raw": "- $30.04 billion revenue, $16.95 billion net income (up from $6.19 billion a year ago)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Shares in the company fell by as much as 7% in after-hours trading", "raw": "- Shares in the company fell by as much as 7% in after-hours trading", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Nvidia faces production challenges with its new Blackwell chip and growing competition, including from its own customers", "raw": "- Nvidia faces production challenges with its new Blackwell chip and growing competition, including from its own customers", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Spending on data centers and energy costs to support A.I. is expected to be $1 trillion", "raw": "- Spending on data centers and energy costs to support A.I. is expected to be $1 trillion", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ‘‰ ", "raw": "๐Ÿ‘‰ ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://www.nytimes.com/2024/08/28/technology/nvidia-earnings-ai-stocks.html", "resource": null, "url": null, "href": "https://www.nytimes.com/2024/08/28/technology/nvidia-earnings-ai-stocks.html", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ›๏ธ California Legislature Approves Bill Proposing Sweeping A.I. Restrictions - NYT", "raw": "๐Ÿ›๏ธ California Legislature Approves Bill Proposing Sweeping A.I. Restrictions - NYT", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Bill S.B. 1047 would require AI companies to test their systems for safety before public release and allow the state attorney general to sue for serious harms caused by AI.", "raw": "- Bill S.B. 1047 would require AI companies to test their systems for safety before public release and allow the state attorney general to sue for serious harms caused by AI.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Supporters argue itโ€™s necessary to mitigate AI risks, while critics worry itโ€™s excessively focused on catastrophic harms and could jeopardize open-source AI development.", "raw": "- Supporters argue itโ€™s necessary to mitigate AI risks, while critics worry itโ€™s excessively focused on catastrophic harms and could jeopardize open-source AI development.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Governor Gavin Newsom has until September 30 to decide on the bill, which could set a national standard for AI regulation if signed into law.", "raw": "- Governor Gavin Newsom has until September 30 to decide on the bill, which could set a national standard for AI regulation if signed into law.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ‘‰ ", "raw": "๐Ÿ‘‰ ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://www.nytimes.com/2024/08/28/technology/california-ai-safety-bill.html", "resource": null, "url": null, "href": "https://www.nytimes.com/2024/08/28/technology/california-ai-safety-bill.html", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿง‘โ€๐Ÿซ Generative AI Transformed English Homework. Math Is Next", "raw": "๐Ÿง‘โ€๐Ÿซ Generative AI Transformed English Homework. Math Is Next", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Gauth app, which can solve math problems from photos, has millions of downloads", "raw": "- Gauth app, which can solve math problems from photos, has millions of downloads", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Got a low B in high-school level algebra and geometry in tests by Wired. \"Likely good enough to satisfy bored students who'd rather spend their time after school doing literally anything else.\"", "raw": "- Got a low B in high-school level algebra and geometry in tests by Wired. \"Likely good enough to satisfy bored students who'd rather spend their time after school doing literally anything else.\"", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- The rise of such AI tools challenges educators to rethink their approach to math homework and teaching methods, possibly leading to a shift towards more in-class practice and personalized learning.", "raw": "- The rise of such AI tools challenges educators to rethink their approach to math homework and teaching methods, possibly leading to a shift towards more in-class practice and personalized learning.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ‘‰ ", "raw": "๐Ÿ‘‰ ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://www.wired.com/story/gauth-ai-math-homework-app/", "resource": null, "url": null, "href": "https://www.wired.com/story/gauth-ai-math-homework-app/", "user": null, "lang": null, "code": null, "label": null } ]
๐Ÿ“ซ A packed AI in the News edition today! ๐Ÿ“‰ Nvidia Revenue Jumps 122% in Positive Sign for Tech's A.I. Boom - NYT - $30.04 billion revenue, $16.95 billion net income (up from $6.19 billion a year ago) - Shares in the company fell by as much as 7% in after-hours trading - Nvidia faces production challenges with its new Blackwell chip and growing competition, including from its own customers - Spending on data centers and energy costs to support A.I. is expected to be $1 trillion ๐Ÿ‘‰ https://www.nytimes.com/2024/08/28/technology/nvidia-earnings-ai-stocks.html ๐Ÿ›๏ธ California Legislature Approves Bill Proposing Sweeping A.I. Restrictions - NYT - Bill S.B. 1047 would require AI companies to test their systems for safety before public release and allow the state attorney general to sue for serious harms caused by AI. - Supporters argue itโ€™s necessary to mitigate AI risks, while critics worry itโ€™s excessively focused on catastrophic harms and could jeopardize open-source AI development. - Governor Gavin Newsom has until September 30 to decide on the bill, which could set a national standard for AI regulation if signed into law. ๐Ÿ‘‰ https://www.nytimes.com/2024/08/28/technology/california-ai-safety-bill.html ๐Ÿง‘โ€๐Ÿซ Generative AI Transformed English Homework. Math Is Next - Gauth app, which can solve math problems from photos, has millions of downloads - Got a low B in high-school level algebra and geometry in tests by Wired. "Likely good enough to satisfy bored students who'd rather spend their time after school doing literally anything else." - The rise of such AI tools challenges educators to rethink their approach to math homework and teaching methods, possibly leading to a shift towards more in-class practice and personalized learning. ๐Ÿ‘‰ https://www.wired.com/story/gauth-ai-math-homework-app/
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg", "fullname": "Florent Daudens", "name": "fdaudens", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 364, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "victor", "John6666", "JackCloudman", "louisbrulenaudet" ], "count": 4 } ]
2024-08-29T13:16:18.000Z
2024-08-29T13:16:18.419Z
[]
/posts/fdaudens/644265186134493
866
0
304767116384171
[ { "type": "text", "value": "Very cool to see more and more amazing startups like ", "raw": "Very cool to see more and more amazing startups like ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/PrunaAI", "resource": null, "url": null, "href": "https://huggingface.co/PrunaAI", "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " relying on Hugging Face to get more visibility, distribution and usage!", "raw": " relying on Hugging Face to get more visibility, distribution and usage!", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
Very cool to see more and more amazing startups like https://huggingface.co/PrunaAI relying on Hugging Face to get more visibility, distribution and usage!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg", "fullname": "Clem ๐Ÿค—", "name": "clem", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1734, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5e67bdd61009063689407479/uq-XlINaBQX1FO3gzjCSZ.png" } ]
[]
[ { "reaction": "โค๏ธ", "users": [ "alielfilali01", "Omarito2412", "louisbrulenaudet" ], "count": 3 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "victor" ], "count": 2 }, { "reaction": "๐Ÿค—", "users": [ "jnh-ordbogen", "prithivMLmods" ], "count": 2 } ]
2024-08-29T11:39:47.000Z
2024-09-19T01:55:36.798Z
[ { "avatarUrl": "/avatars/2f7a1cfc68e6f5c0a7ddb323d2ffd252.svg", "fullname": "Mads", "name": "mhenrichsen", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 41, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg", "fullname": "Clem ๐Ÿค—", "name": "clem", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1734, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1659336880158-6273f303f6d63a28483fde12.png", "fullname": "Lucain Pouget", "name": "Wauplin", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 156, "isFollowing": false }, { "avatarUrl": "/avatars/744eddaa7dfc34a57df9ce32a78059a0.svg", "fullname": "Tyrone Pierce", "name": "piercyy", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false } ]
/posts/clem/304767116384171
1,784
7
981999224157727
[ { "type": "text", "value": "๐Ÿ’ฌ ๐Ÿ‡ฎ๐Ÿ‡น Phi 3.5 mini ITA: a Small Language Model for Italian", "raw": "๐Ÿ’ฌ ๐Ÿ‡ฎ๐Ÿ‡น Phi 3.5 mini ITA: a Small Language Model for Italian", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Lately, I've spent some time fine-tuning language models.", "raw": "Lately, I've spent some time fine-tuning language models.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Now I am happy to release Phi 3.5 mini ITA: a fine-tuned version of Phi-3.5-mini-instruct to improve performance on the Italian language", "raw": "Now I am happy to release Phi 3.5 mini ITA: a fine-tuned version of Phi-3.5-mini-instruct to improve performance on the Italian language", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ”น Small (3.82 B parameters) but capable model", "raw": "๐Ÿ”น Small (3.82 B parameters) but capable model", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ”น 128k context length", "raw": "๐Ÿ”น 128k context length", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Chat with it on ๐Ÿค— Spaces: ", "raw": "Chat with it on ๐Ÿค— Spaces: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/anakin87/Phi-3.5-mini-ITA", "resource": { "type": "space", "id": "anakin87/Phi-3.5-mini-ITA", "discussionNum": null }, "url": "https://huggingface.co/spaces/anakin87/Phi-3.5-mini-ITA", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Model card: ", "raw": "Model card: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/anakin87/Phi-3.5-mini-ITA", "resource": { "type": "model", "id": "anakin87/Phi-3.5-mini-ITA", "discussionNum": null }, "url": "https://huggingface.co/anakin87/Phi-3.5-mini-ITA", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ—ƒ๏ธ Data", "raw": "๐Ÿ—ƒ๏ธ Data", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Supervised fine-tuning using a good mix of English and Italian data:", "raw": "Supervised fine-tuning using a good mix of English and Italian data:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- ", "raw": "- ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/mlabonne/FineTome-100k", "resource": { "type": "dataset", "id": "mlabonne/FineTome-100k", "discussionNum": null }, "url": "https://huggingface.co/datasets/mlabonne/FineTome-100k", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " by ", "raw": " by ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@mlabonne", "resource": null, "url": null, "href": null, "user": "mlabonne", "lang": null, "code": null, "label": null }, { "type": "text", "value": " ", "raw": " ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- ", "raw": "- ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/efederici/capybara-claude-15k-ita", "resource": { "type": "dataset", "id": "efederici/capybara-claude-15k-ita", "discussionNum": null }, "url": "https://huggingface.co/datasets/efederici/capybara-claude-15k-ita", "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " by ", "raw": " by ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "mention", "value": null, "raw": "@efederici", "resource": null, "url": null, "href": null, "user": "efederici", "lang": null, "code": null, "label": null }, { "type": "text", "value": " ", "raw": " ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ™ Thanks to the authors for the datasets.", "raw": "๐Ÿ™ Thanks to the authors for the datasets.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐ŸŽฏ Targeted training with Spectrum", "raw": "๐ŸŽฏ Targeted training with Spectrum", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "I used Spectrum, a relatively new technique for parameter-efficient learning.", "raw": "I used Spectrum, a relatively new technique for parameter-efficient learning.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "The idea is to train only the layers of the model with high Signal-to-Noise Ratio (SNR) and โ„๏ธ freeze the rest.", "raw": "The idea is to train only the layers of the model with high Signal-to-Noise Ratio (SNR) and โ„๏ธ freeze the rest.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "I trained the top 30% of model layers.", "raw": "I trained the top 30% of model layers.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ“ Spectrum paper: ", "raw": "๐Ÿ“ Spectrum paper: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/abs/2406.06623", "resource": null, "url": null, "href": "https://arxiv.org/abs/2406.06623", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ“Š Vibe check and performance on Italian benchmarks seem encouraging", "raw": "๐Ÿ“Š Vibe check and performance on Italian benchmarks seem encouraging", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
๐Ÿ’ฌ ๐Ÿ‡ฎ๐Ÿ‡น Phi 3.5 mini ITA: a Small Language Model for Italian Lately, I've spent some time fine-tuning language models. Now I am happy to release Phi 3.5 mini ITA: a fine-tuned version of Phi-3.5-mini-instruct to improve performance on the Italian language ๐Ÿ”น Small (3.82 B parameters) but capable model ๐Ÿ”น 128k context length Chat with it on ๐Ÿค— Spaces: https://huggingface.co/spaces/anakin87/Phi-3.5-mini-ITA Model card: https://huggingface.co/anakin87/Phi-3.5-mini-ITA ๐Ÿ—ƒ๏ธ Data Supervised fine-tuning using a good mix of English and Italian data: - https://huggingface.co/datasets/mlabonne/FineTome-100k by @mlabonne - https://huggingface.co/datasets/efederici/capybara-claude-15k-ita by @efederici ๐Ÿ™ Thanks to the authors for the datasets. ๐ŸŽฏ Targeted training with Spectrum I used Spectrum, a relatively new technique for parameter-efficient learning. The idea is to train only the layers of the model with high Signal-to-Noise Ratio (SNR) and โ„๏ธ freeze the rest. I trained the top 30% of model layers. ๐Ÿ“ Spectrum paper: https://arxiv.org/abs/2406.06623 ๐Ÿ“Š Vibe check and performance on Italian benchmarks seem encouraging
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/626505d493e0b04d75710566/9rfJc9ORXU9J5a42Ev3v6.png", "fullname": "Stefano Fiorucci", "name": "anakin87", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 66, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/626505d493e0b04d75710566/juN6pOZ4lyQrdjKS4eU1P.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/626505d493e0b04d75710566/Tkkij8hfXgg65vtkjBdiV.png" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/612246596d9ce900691744d2/9DlHVQDqblKz7QPTA6nDa.jpeg", "fullname": "Edoardo Federici", "name": "efederici", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 27 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61b8e2ba285851687028d395/JtUGAwVh_4cDEsjNcfpye.png", "fullname": "Maxime Labonne", "name": "mlabonne", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 3452 } ]
[ { "reaction": "๐Ÿ‘", "users": [ "mlabonne", "John6666", "osanseviero", "victor", "merve", "kristaller486" ], "count": 6 }, { "reaction": "๐Ÿ”ฅ", "users": [ "lukecage", "4rtemi5" ], "count": 2 } ]
2024-08-29T10:17:17.000Z
2024-08-29T10:30:16.856Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/626505d493e0b04d75710566/9rfJc9ORXU9J5a42Ev3v6.png", "fullname": "Stefano Fiorucci", "name": "anakin87", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 66, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61b8e2ba285851687028d395/JtUGAwVh_4cDEsjNcfpye.png", "fullname": "Maxime Labonne", "name": "mlabonne", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 3452, "isFollowing": false } ]
/posts/anakin87/981999224157727
1,618
2
762704167945404
[ { "type": "text", "value": "1. **Overview**", "raw": "1. **Overview**", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "\"EveryText\" is at the forefront of AI image generation, offering a novel \"TBF ('Text by Font') Image Model\" that enables the representation of all languages globally in AI-generated images without prior training.", "raw": "\"EveryText\" is at the forefront of AI image generation, offering a novel \"TBF ('Text by Font') Image Model\" that enables the representation of all languages globally in AI-generated images without prior training.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "2. **Background**", "raw": "2. **Background**", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Platforms like MidJourneyV6 and FLUX have advanced AI image generation, typically supporting English text. Alibaba Group expanded this to include Chinese, Japanese, and Korean, signaling a shift towards global language support.", "raw": "Platforms like MidJourneyV6 and FLUX have advanced AI image generation, typically supporting English text. Alibaba Group expanded this to include Chinese, Japanese, and Korean, signaling a shift towards global language support.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "3. **Challenges**", "raw": "3. **Challenges**", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Existing methods faced several challenges including the need for additional editing, dependency on specific training, and substantial resource requirements. These approaches also struggled with limited vocabulary and were primarily effective only for English.", "raw": "Existing methods faced several challenges including the need for additional editing, dependency on specific training, and substantial resource requirements. These approaches also struggled with limited vocabulary and were primarily effective only for English.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "4. **Innovative Solution**", "raw": "4. **Innovative Solution**", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "EveryText utilizes \"Fonts\" as pre-trained models, allowing any text to be visually represented without traditional training. This approach not only enhances diversity and aesthetics by utilizing various fonts but also ensures unlimited expression.", "raw": "EveryText utilizes \"Fonts\" as pre-trained models, allowing any text to be visually represented without traditional training. This approach not only enhances diversity and aesthetics by utilizing various fonts but also ensures unlimited expression.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "5. **Using the Service**", "raw": "5. **Using the Service**", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "EveryText is free and easy to use:", "raw": "EveryText is free and easy to use:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- **Prompt**: Describe the image.", "raw": "- **Prompt**: Describe the image.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- **Text for Image Generation**: Add your text.", "raw": "- **Text for Image Generation**: Add your text.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- **Text Position and Size**: Customize the text's placement and size.", "raw": "- **Text Position and Size**: Customize the text's placement and size.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- **Font Selection**: Optionally select a font.", "raw": "- **Font Selection**: Optionally select a font.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- **Advanced Settings**: Further refine the image creation.", "raw": "- **Advanced Settings**: Further refine the image creation.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Click \"START\" to generate the image.", "raw": "- Click \"START\" to generate the image.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "6. **Comparative Analysis**", "raw": "6. **Comparative Analysis**", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "EveryText supports all languages with superior image quality and text legibility, setting it apart from platforms like MidJourneyV6/Flux and AnyText by Alibaba Group.", "raw": "EveryText supports all languages with superior image quality and text legibility, setting it apart from platforms like MidJourneyV6/Flux and AnyText by Alibaba Group.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "7. **Conclusion**", "raw": "7. **Conclusion**", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "EveryText has revolutionized AI-generated imagery by integrating all global languages, broadening the scope for creative and communicative applications. Its future potential is vast and promising.", "raw": "EveryText has revolutionized AI-generated imagery by integrating all global languages, broadening the scope for creative and communicative applications. Its future potential is vast and promising.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "**Related Links**", "raw": "**Related Links**", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Huggingface Service: ", "raw": "- Huggingface Service: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://fantos-EveryText.hf.space", "resource": null, "url": null, "href": "https://fantos-EveryText.hf.space", "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": " ", "raw": " ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "-email: [email protected]", "raw": "-email: [email protected]", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
1. **Overview** "EveryText" is at the forefront of AI image generation, offering a novel "TBF ('Text by Font') Image Model" that enables the representation of all languages globally in AI-generated images without prior training. 2. **Background** Platforms like MidJourneyV6 and FLUX have advanced AI image generation, typically supporting English text. Alibaba Group expanded this to include Chinese, Japanese, and Korean, signaling a shift towards global language support. 3. **Challenges** Existing methods faced several challenges including the need for additional editing, dependency on specific training, and substantial resource requirements. These approaches also struggled with limited vocabulary and were primarily effective only for English. 4. **Innovative Solution** EveryText utilizes "Fonts" as pre-trained models, allowing any text to be visually represented without traditional training. This approach not only enhances diversity and aesthetics by utilizing various fonts but also ensures unlimited expression. 5. **Using the Service** EveryText is free and easy to use: - **Prompt**: Describe the image. - **Text for Image Generation**: Add your text. - **Text Position and Size**: Customize the text's placement and size. - **Font Selection**: Optionally select a font. - **Advanced Settings**: Further refine the image creation. - Click "START" to generate the image. 6. **Comparative Analysis** EveryText supports all languages with superior image quality and text legibility, setting it apart from platforms like MidJourneyV6/Flux and AnyText by Alibaba Group. 7. **Conclusion** EveryText has revolutionized AI-generated imagery by integrating all global languages, broadening the scope for creative and communicative applications. Its future potential is vast and promising. **Related Links** - Huggingface Service: https://fantos-EveryText.hf.space -email: [email protected]
{ "avatarUrl": "/avatars/a45d25cafbb39b1147a694643d17799e.svg", "fullname": "master", "name": "fantos", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 14, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/659f6dc8b2ac16613528c836/RmlzSQ5hgvcRKSXi7GHVT.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/659f6dc8b2ac16613528c836/DFbxwR7l3GjL1nzcO2iV9.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/659f6dc8b2ac16613528c836/A2lMophIKHWRQ-vQerpCx.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/659f6dc8b2ac16613528c836/N8K9Vm2M1_a2SfiPyuGbp.png" } ]
[]
[ { "reaction": "โค๏ธ", "users": [ "seawolf2357", "Kims12", "CSB261", "kijeoung", "maywell", "Obenlia", "jyoung105", "Bruhn", "aiqtech" ], "count": 9 }, { "reaction": "๐Ÿš€", "users": [ "seawolf2357", "Omarito2412", "maywell", "aiqtech" ], "count": 4 }, { "reaction": "๐Ÿ‘€", "users": [ "seawolf2357", "John6666", "aiqtech" ], "count": 3 }, { "reaction": "๐Ÿ”ฅ", "users": [ "seawolf2357", "aiqtech" ], "count": 2 }, { "reaction": "๐Ÿค—", "users": [ "seawolf2357", "aiqtech" ], "count": 2 }, { "reaction": "๐Ÿ‘", "users": [ "SG34", "aiqtech" ], "count": 2 }, { "reaction": "๐Ÿง ", "users": [ "aiqtech" ], "count": 1 } ]
2024-08-29T09:28:52.000Z
2024-08-29T09:29:18.289Z
[]
/posts/fantos/762704167945404
2,182
0
617519511025924
[ { "type": "text", "value": "๐Ÿ’กAndrew Ng recently gave a strong defense of Open Source AI models and the need to slow down legislative efforts in the US and the EU to restrict innovation in Open Source AI at Stanford GSB.", "raw": "๐Ÿ’กAndrew Ng recently gave a strong defense of Open Source AI models and the need to slow down legislative efforts in the US and the EU to restrict innovation in Open Source AI at Stanford GSB.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐ŸŽฅSee video below", "raw": "๐ŸŽฅSee video below", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://youtu.be/yzUdmwlh1sQ?si=bZc690p8iubolXm_", "resource": null, "url": null, "href": "https://youtu.be/yzUdmwlh1sQ?si=bZc690p8iubolXm_", "user": null, "lang": null, "code": null, "label": null } ]
๐Ÿ’กAndrew Ng recently gave a strong defense of Open Source AI models and the need to slow down legislative efforts in the US and the EU to restrict innovation in Open Source AI at Stanford GSB. ๐ŸŽฅSee video below https://youtu.be/yzUdmwlh1sQ?si=bZc690p8iubolXm_
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/641b754d1911d3be6745cce9/GXN8mEmaq3rfITRrw7GeZ.jpeg", "fullname": "atayloraerospace", "name": "Taylor658", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 74, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "ZennyKenny", "ParagBharadia", "EgesaWO", "clem", "carsenk", "KvnMln", "brainhome", "maximuspowers", "TheDrunkenSnail", "louisbrulenaudet" ], "count": 10 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666", "osanseviero", "clem", "carsenk", "maximuspowers", "odyss3y" ], "count": 6 } ]
2024-08-29T04:32:08.000Z
2024-08-29T12:23:41.152Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/656e3808d4de03a07d116850/JZh4lrjFueJZVqugjoloP.jpeg", "fullname": "Kenneth Hamilton", "name": "ZennyKenny", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 32, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65d883893a52cd9bcd8ab7cf/tRsCJlHNZo1D02kBTmfy9.jpeg", "fullname": "leroy Samuel Dyer", "name": "LeroyDyer", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 82, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg", "fullname": "Clem ๐Ÿค—", "name": "clem", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1734, "isFollowing": false } ]
/posts/Taylor658/617519511025924
2,345
4
614968693477682
[ { "type": "text", "value": "Made public a dataset of scraped teletype articles.", "raw": "Made public a dataset of scraped teletype articles.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Here's the overview:", "raw": "Here's the overview:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- 3.3 million articles, predominantly in Russian and English", "raw": "- 3.3 million articles, predominantly in Russian and English", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Includes original HTML, extracted text and metadata", "raw": "- Includes original HTML, extracted text and metadata", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- All articles were run through language identification", "raw": "- All articles were run through language identification", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Includes all public articles up until April 2024", "raw": "- Includes all public articles up until April 2024", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/its5Q/teletype", "resource": { "type": "dataset", "id": "its5Q/teletype", "discussionNum": null }, "url": "https://huggingface.co/datasets/its5Q/teletype", "href": null, "user": null, "lang": null, "code": null, "label": null } ]
Made public a dataset of scraped teletype articles. Here's the overview: - 3.3 million articles, predominantly in Russian and English - Includes original HTML, extracted text and metadata - All articles were run through language identification - Includes all public articles up until April 2024 https://huggingface.co/datasets/its5Q/teletype
{ "avatarUrl": "/avatars/a692e2e2a3b0222e2f8cdfc44ac8d64c.svg", "fullname": "its5Q", "name": "its5Q", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 14, "isFollowing": false }
[]
[]
[ { "reaction": "๐Ÿ‘", "users": [ "ibrahim313", "kristaller486" ], "count": 2 }, { "reaction": "๐Ÿ‘€", "users": [ "John6666" ], "count": 1 } ]
2024-08-28T17:47:29.000Z
2024-08-28T17:47:29.108Z
[]
/posts/its5Q/614968693477682
1,080
0
524900219749834
[ { "type": "mention", "value": null, "raw": "@victor", "resource": null, "url": null, "href": null, "user": "victor", "lang": null, "code": null, "label": null }, { "type": "text", "value": " (is this the only way to \"DM\" on HF?) ", "raw": " (is this the only way to \"DM\" on HF?) ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Had a funny thought, would it be at all possible to rework what shows up on our personal HF page?", "raw": "Had a funny thought, would it be at all possible to rework what shows up on our personal HF page?", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Picture this: I upload a model to an organization, someone who follows me now has no idea that I've uploaded a model or to where, unless they also watch those repos (which also floods them with other notifications)", "raw": "Picture this: I upload a model to an organization, someone who follows me now has no idea that I've uploaded a model or to where, unless they also watch those repos (which also floods them with other notifications)", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "What if our main Huggingface page was a collection of both models that we've uploaded specifically to our profile, as well as models we've uploaded to organizations? That way it would all be contained in one central followable location, and I wouldn't have concerns about losing followership if I wanted to upload to an organization all of a sudden.", "raw": "What if our main Huggingface page was a collection of both models that we've uploaded specifically to our profile, as well as models we've uploaded to organizations? That way it would all be contained in one central followable location, and I wouldn't have concerns about losing followership if I wanted to upload to an organization all of a sudden.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null } ]
@victor (is this the only way to "DM" on HF?) Had a funny thought, would it be at all possible to rework what shows up on our personal HF page? Picture this: I upload a model to an organization, someone who follows me now has no idea that I've uploaded a model or to where, unless they also watch those repos (which also floods them with other notifications) What if our main Huggingface page was a collection of both models that we've uploaded specifically to our profile, as well as models we've uploaded to organizations? That way it would all be contained in one central followable location, and I wouldn't have concerns about losing followership if I wanted to upload to an organization all of a sudden.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6435718aaaef013d1aec3b8b/XKf-8MA47tjVAM6SCX0MP.jpeg", "fullname": "Bartowski", "name": "bartowski", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 2735, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg", "fullname": "Victor Mustar", "name": "victor", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 2578 } ]
[ { "reaction": "๐Ÿ‘", "users": [ "mammour", "John6666", "osanseviero", "clem", "Joseph717171" ], "count": 5 }, { "reaction": "๐Ÿ‘€", "users": [ "clem", "Joseph717171" ], "count": 2 }, { "reaction": "๐Ÿค—", "users": [ "Joseph717171" ], "count": 1 } ]
2024-08-28T15:30:47.000Z
2024-08-28T23:25:59.946Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg", "fullname": "Victor Mustar", "name": "victor", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 2578, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6435718aaaef013d1aec3b8b/XKf-8MA47tjVAM6SCX0MP.jpeg", "fullname": "Bartowski", "name": "bartowski", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 2735, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 384, "isFollowing": false } ]
/posts/bartowski/524900219749834
4,666
3
236937909625634
[ { "type": "text", "value": "๐Ÿ”ฅ Introducing \"Writing in the Margins (WiM)\" - better inference pattern for long context LLMs that solves the Lost-in-the-Middle problem ๐Ÿ”ฅ", "raw": "๐Ÿ”ฅ Introducing \"Writing in the Margins (WiM)\" - better inference pattern for long context LLMs that solves the Lost-in-the-Middle problem ๐Ÿ”ฅ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Paper page: ", "raw": "Paper page: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2408.14906", "resource": { "type": "paper", "id": "2408.14906", "discussionNum": null }, "url": "https://huggingface.co/papers/2408.14906", "href": null, "user": null, "lang": null, "code": null, "label": "Writing in the Margins: Better Inference Pattern for Long Context\n Retrieval (2408.14906)" }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "TL;DR", "raw": "TL;DR", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Make your model write \"margin notes\" as you chunk prefill the KV cache. Then ask it reread all notes before it speaks up.", "raw": "Make your model write \"margin notes\" as you chunk prefill the KV cache. Then ask it reread all notes before it speaks up.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Works with humans, works with AI ๐Ÿค–", "raw": "Works with humans, works with AI ๐Ÿค–", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "WiM leverages the chunked prefill of the key-value cache, which concurrently generates query-based extractive summaries at each step of the prefill that are subsequently reintegrated at the end of the computation. We term these intermediate outputs โ€œmarginsโ€, drawing inspiration from the practice of making margin notes for improved comprehension of long contexts in human reading. We show that this technique, which adds only minimal additional computation, significantly improves LLMs long context reasoning capabilities. ", "raw": "WiM leverages the chunked prefill of the key-value cache, which concurrently generates query-based extractive summaries at each step of the prefill that are subsequently reintegrated at the end of the computation. We term these intermediate outputs โ€œmarginsโ€, drawing inspiration from the practice of making margin notes for improved comprehension of long contexts in human reading. We show that this technique, which adds only minimal additional computation, significantly improves LLMs long context reasoning capabilities. ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Think: Every chunk has a chance to be attended to/ be at the end of the context at least once. ๐ŸŽ‰", "raw": "Think: Every chunk has a chance to be attended to/ be at the end of the context at least once. ๐ŸŽ‰", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ“Š Results:", "raw": "๐Ÿ“Š Results:", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- An average accuracy boost of 7.5% in multi-hop reasoning tasks like HotpotQA and MultiHop-RAG.", "raw": "- An average accuracy boost of 7.5% in multi-hop reasoning tasks like HotpotQA and MultiHop-RAG.", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "- Even a 30% increase in F1-score for summarisation-like tasks (CWE).", "raw": "- Even a 30% increase in F1-score for summarisation-like tasks (CWE).", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "Plus, WiM fits seamlessly into interactive applications (think: progress bar!). It can provide real-time progress updates during data retrieval and integration, making it user-friendly and transparent - a stark contrast to feeding 1mln tokens to an LLMs and waiting 6 min for the first token. ๐Ÿคฏ", "raw": "Plus, WiM fits seamlessly into interactive applications (think: progress bar!). It can provide real-time progress updates during data retrieval and integration, making it user-friendly and transparent - a stark contrast to feeding 1mln tokens to an LLMs and waiting 6 min for the first token. ๐Ÿคฏ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿ‘ฉโ€๐Ÿ’ป๐Ÿง‘โ€๐Ÿ’ป Check it out and contribute to our open-source project here: ", "raw": "๐Ÿ‘ฉโ€๐Ÿ’ป๐Ÿง‘โ€๐Ÿ’ป Check it out and contribute to our open-source project here: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://github.com/writer/writing-in-the-margins", "resource": null, "url": null, "href": "https://github.com/writer/writing-in-the-margins", "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "new_line", "value": null, "raw": "\n", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "text", "value": "๐Ÿง  More about chunked prefill: ", "raw": "๐Ÿง  More about chunked prefill: ", "resource": null, "url": null, "href": null, "user": null, "lang": null, "code": null, "label": null }, { "type": "link", "value": null, "raw": "https://docs.vllm.ai/en/latest/models/performance.html#chunked-prefill", "resource": null, "url": null, "href": "https://docs.vllm.ai/en/latest/models/performance.html#chunked-prefill", "user": null, "lang": null, "code": null, "label": null } ]
๐Ÿ”ฅ Introducing "Writing in the Margins (WiM)" - better inference pattern for long context LLMs that solves the Lost-in-the-Middle problem ๐Ÿ”ฅ Paper page: https://huggingface.co/papers/2408.14906 TL;DR Make your model write "margin notes" as you chunk prefill the KV cache. Then ask it reread all notes before it speaks up. Works with humans, works with AI ๐Ÿค– WiM leverages the chunked prefill of the key-value cache, which concurrently generates query-based extractive summaries at each step of the prefill that are subsequently reintegrated at the end of the computation. We term these intermediate outputs โ€œmarginsโ€, drawing inspiration from the practice of making margin notes for improved comprehension of long contexts in human reading. We show that this technique, which adds only minimal additional computation, significantly improves LLMs long context reasoning capabilities. Think: Every chunk has a chance to be attended to/ be at the end of the context at least once. ๐ŸŽ‰ ๐Ÿ“Š Results: - An average accuracy boost of 7.5% in multi-hop reasoning tasks like HotpotQA and MultiHop-RAG. - Even a 30% increase in F1-score for summarisation-like tasks (CWE). Plus, WiM fits seamlessly into interactive applications (think: progress bar!). It can provide real-time progress updates during data retrieval and integration, making it user-friendly and transparent - a stark contrast to feeding 1mln tokens to an LLMs and waiting 6 min for the first token. ๐Ÿคฏ ๐Ÿ‘ฉโ€๐Ÿ’ป๐Ÿง‘โ€๐Ÿ’ป Check it out and contribute to our open-source project here: https://github.com/writer/writing-in-the-margins ๐Ÿง  More about chunked prefill: https://docs.vllm.ai/en/latest/models/performance.html#chunked-prefill
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1625692968400-noauth.jpeg", "fullname": "Melisa Russak", "name": "melisa", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 21, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60e61b3969bd0df25c9375da/-5M-or1OjD3KMMEzdxbBC.png" } ]
[]
[ { "reaction": "๐Ÿ”ฅ", "users": [ "samjulien", "armiller123", "Sri-Vigneshwar-DJ", "mattsobel", "victor", "AdinaY", "jeffboudier", "hkproj", "kiranr", "andrewrreed", "John6666", "leeloolee", "clem", "codelion" ], "count": 14 }, { "reaction": "โค๏ธ", "users": [ "codelion", "hitchhiker3010" ], "count": 2 } ]
2024-08-28T15:03:38.000Z
2024-10-05T03:30:39.536Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/666d1e4e2b9e45273912c14a/FffOTN2hceaGWWoGqnJZW.jpeg", "fullname": "Sam Julien", "name": "samjulien", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 7, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1677134945205-62f32eab52ad88c930bb3f3b.png", "fullname": "Asankhaya Sharma", "name": "codelion", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 46, "isFollowing": false } ]
/posts/melisa/236937909625634
2,971
2