mtasic85 commited on
Commit
ae1a4de
1 Parent(s): 7e0d09e
Files changed (2) hide show
  1. README.md +61 -0
  2. misc/logo.png +3 -0
README.md CHANGED
@@ -1,3 +1,64 @@
1
  ---
2
  license: apache-2.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: apache-2.0
3
+ pipeline_tag: text-generation
4
+ library_name: transformers
5
+ language: [
6
+ 'am', 'ar', 'as', 'az', 'be', 'bg', 'bn', 'bn_rom', 'br',
7
+ 'bs', 'ca', 'cs', 'cy', 'da', 'de', 'el', 'en', 'eo', 'es',
8
+ 'et', 'eu', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl',
9
+ 'gn', 'gu', 'ha', 'he', 'hi', 'hi_rom', 'hr', 'ht', 'hu',
10
+ 'hy', 'id', 'ig', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km',
11
+ 'kn', 'ko', 'ku', 'ky', 'la', 'lg', 'li', 'ln', 'lo', 'lt',
12
+ 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'my_zaw',
13
+ 'ne', 'nl', 'no', 'ns', 'om', 'or', 'pa', 'pl', 'ps', 'pt',
14
+ 'qu', 'rm', 'ro', 'ru', 'sa', 'si', 'sc', 'sd', 'sk', 'sl',
15
+ 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'ta_rom',
16
+ 'te', 'te_rom', 'th', 'tl', 'tn', 'tr', 'ug', 'uk', 'ur',
17
+ 'ur_rom', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo',
18
+ 'zh-Hans', 'zh-Hant', 'zu',
19
+ ]
20
+ datasets: [
21
+ 'yahma/alpaca-cleaned',
22
+ 'gbharti/wealth-alpaca_lora',
23
+ 'saillab/taco-datasets',
24
+ 'xu-song/cc100-samples',
25
+ 'ontocord/fineweb-permissive-multilingual-2m',
26
+ 'MuskumPillerum/General-Knowledge',
27
+ 'yirenc/general_knowledge_boolean',
28
+ 'nampdn-ai/tiny-textbooks',
29
+ 'nampdn-ai/tiny-codes',
30
+ 'bigcode/the-stack-smol-xs',
31
+ 'm-a-p/CodeFeedback-Filtered-Instruction',
32
+ 'jtatman/python-code-dataset-500k',
33
+ 'iamtarun/python_code_instructions_18k_alpaca',
34
+ 'HuggingFaceH4/CodeAlpaca_20K',
35
+ 'gair-prox/open-web-math-pro',
36
+ 'rvv-karma/Math-QA',
37
+ 'ajibawa-2023/Maths-College',
38
+ 'microsoft/orca-math-word-problems-200k',
39
+ 'fblgit/simple-math',
40
+ 'SkunkworksAI/reasoning-0.01',
41
+ 'badrex/llm-emoji-dataset',
42
+ ]
43
+ tags:
44
+ - litgpt
45
+ - litdata
46
  ---
47
+
48
+ # tangled-llama-58m-32k-base-v0.1
49
+
50
+ ![logo](./misc/logo.png)
51
+
52
+ A pretrained language model based on the Llama model with about **58M** parameters. This model has been trained on **11.4B** (`11,422,750,857`) tokens from more than **0.8M** (`796,399`) dataset rows.
53
+
54
+ This model **isn't** designed for immediate use but rather for Continued Pretraining and Finetuning on a downstream task. While it can handle a context length of up to **128K** (`131,072`) tokens, it was pretrained with sequences of **2K** (`2048`) tokens.
55
+
56
+ The objective is to streamline the cognitive or reasoning core, eliminating any redundant knowledge from the model.
57
+
58
+ [loss, val_loss](https://api.wandb.ai/links/mtasic85/0i3wqsmb)
59
+
60
+ [val_ppl](https://api.wandb.ai/links/mtasic85/vz1jgu3v)
61
+
62
+ [epoch](https://api.wandb.ai/links/mtasic85/qltqthjr)
63
+
64
+ [learning_rate](https://api.wandb.ai/links/mtasic85/eqpiton4)
misc/logo.png ADDED

Git LFS Details

  • SHA256: e0f10836404a8157509488573f15b011225fea11eeb3d0682b6873d542826a3e
  • Pointer size: 132 Bytes
  • Size of remote file: 1.72 MB