Fraser commited on
Commit
caac576
1 Parent(s): 4d9e14d

start sagemaker code

Browse files
Files changed (6) hide show
  1. README.md +2 -0
  2. download_model.py +7 -0
  3. requirements.txt +7 -0
  4. start_training.py +38 -0
  5. train.py +161 -0
  6. train.sh +0 -0
README.md CHANGED
@@ -2,3 +2,5 @@
2
  # Wiki-VAE
3
 
4
  A Transformer-VAE trained on all the sentences in wikipedia.
 
 
 
2
  # Wiki-VAE
3
 
4
  A Transformer-VAE trained on all the sentences in wikipedia.
5
+
6
+ Training is done on AWS SageMaker.
download_model.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from sagemaker.s3 import S3Downloader
2
+
3
+ S3Downloader.download(
4
+ s3_uri=huggingface_estimator.model_data, # s3 uri where the trained model is located
5
+ local_path='.', # local path where *.targ.gz is saved
6
+ sagemaker_session=sess # sagemaker session used for training the model
7
+ )
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ wheel
2
+ torch
3
+ transformers
4
+ datasets
5
+ tokenizers
6
+ sagemaker
7
+ scikit-learn
start_training.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sagemaker.huggingface import HuggingFace
2
+
3
+ ROLE = ?
4
+
5
+ # hyperparameters, which are passed into the training job
6
+ hyperparameters = {
7
+ 'epochs': 1,
8
+ 'per_device_train_batch_size': 32,
9
+ 'do_train': True,
10
+ 'model_name_or_path': 'distilbert-base-uncased',
11
+ 'output_dir': '/opt/ml/checkpoints'
12
+ }
13
+
14
+
15
+ # create the Estimator
16
+ huggingface_estimator = HuggingFace(
17
+ entry_point='train.py',
18
+ source_dir='.',
19
+ instance_type='local', # 'ml.p3.2xlarge',
20
+ instance_count=1,
21
+ checkpoint_s3_uri=f's3://{sess.default_bucket()}/checkpoints',
22
+ use_spot_instances=True,
23
+ max_wait=3600, # This should be equal to or greater than max_run in seconds'
24
+ max_run=1000,
25
+ role=ROLE,
26
+ transformers_version='4.4',
27
+ pytorch_version='1.6',
28
+ py_version='py36',
29
+ hyperparameters=hyperparameters,
30
+ )
31
+
32
+
33
+ huggingface_estimator.fit(
34
+ {
35
+ 'train': 's3://sagemaker-us-east-1-558105141721/samples/datasets/imdb/train',
36
+ 'test': 's3://sagemaker-us-east-1-558105141721/samples/datasets/imdb/test'
37
+ }
38
+ )
train.py CHANGED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import sys
3
+ import argparse
4
+ import os
5
+ import inspect
6
+ from typing import Optional, Any
7
+ from dataclasses import dataclass, field, make_dataclass
8
+ from transformers import Trainer, TrainingArguments, AutoTokenizer, HfArgumentParser
9
+ from datasets import load_from_disk
10
+
11
+ from funnel_vae.src.funnel_vae import FunnelVae
12
+ from funnel_vae.src.config import FunnelVaeConfig
13
+
14
+
15
+ @dataclass
16
+ class BaseArgs:
17
+ # hyperparameters sent by the client are passed as command-line arguments to the script.
18
+ model_name: str
19
+ epochs: int = 3
20
+ per_device_train_batch_size: int = 32
21
+ per_device_eval_batch_size: int = 64
22
+ warmup_steps: int = 500
23
+ learning_rate: str = 5e-5
24
+
25
+ output_data_dir: str = os.environ["SM_OUTPUT_DATA_DIR"]
26
+ model_dir: str = os.environ["SM_MODEL_DIR"]
27
+ n_gpus: str = os.environ["SM_NUM_GPUS"]
28
+ training_dir: str = os.environ["SM_CHANNEL_TRAIN"]
29
+ test_dir: str = os.environ["SM_CHANNEL_TEST"]
30
+
31
+
32
+ # ModelArguments
33
+ fields = [
34
+ (
35
+ 'tokenizer_name', Optional[str], field(
36
+ default='t5-base', metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
37
+ )
38
+ ),
39
+ ] + [
40
+ (
41
+ name, type(info.default) if info.default is not None else Any, field(
42
+ default=info.default, metadata={"help": f"Has default {info.default}, see FunnelVaeConfig docstring for more info."}
43
+ )
44
+ )
45
+ # get relevent model arguments with defaults
46
+ for name, info in inspect.signature(FunnelVaeConfig.__init__).parameters.items() if name not in ['self', 'kwargs', 'use_extra_logs', 'cache_dir']
47
+ ]
48
+ # ensure starting with non-default args
49
+ start_f = list(filter(lambda field: field[2].default is None, fields))
50
+ end_f = list(filter(lambda field: field[2].default is not None, fields))
51
+ ModelArguments = make_dataclass('ModelArguments', start_f + end_f)
52
+
53
+
54
+ @dataclass
55
+ class DataArguments:
56
+ dataset_name: Optional[str] = field(
57
+ default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
58
+ )
59
+ text_column: Optional[str] = field(default=None, metadata={"help": "Use this dataset column as 'text'."})
60
+ train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
61
+ validation_file: Optional[str] = field(
62
+ default=None,
63
+ metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
64
+ )
65
+ overwrite_cache: bool = field(default=False, metadata={"help": "Overwrite the cached training and evaluation sets"})
66
+ preprocessing_num_workers: Optional[int] = field(
67
+ default=None,
68
+ metadata={"help": "The number of processes to use for the preprocessing."},
69
+ )
70
+ mlm_probability: float = field(
71
+ default=0.0, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
72
+ )
73
+ validation_name: str = field(
74
+ default="validation",
75
+ metadata={"help": "Name of the set to run evaluation on."},
76
+ )
77
+
78
+ def __post_init__(self):
79
+ if self.dataset_name is None and self.train_file is None and self.validation_file is None:
80
+ raise ValueError("Need either a dataset name or a training/validation file.")
81
+ else:
82
+ if self.train_file is not None:
83
+ extension = self.train_file.split(".")[-1]
84
+ assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, json or txt file."
85
+ if self.validation_file is not None:
86
+ extension = self.validation_file.split(".")[-1]
87
+ assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, json or txt file."
88
+
89
+
90
+ if __name__ == "__main__":
91
+ parser = HfArgumentParser((BaseArgs, ModelArguments, DataArguments, TrainingArguments))
92
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
93
+
94
+ parser = argparse.ArgumentParser()
95
+
96
+ args, _ = parser.parse_known_args()
97
+
98
+ # Set up logging
99
+ logger = logging.getLogger(__name__)
100
+
101
+ logging.basicConfig(
102
+ level=logging.getLevelName("INFO"),
103
+ handlers=[logging.StreamHandler(sys.stdout)],
104
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
105
+ )
106
+
107
+ # load datasets
108
+ train_dataset = load_from_disk(args.training_dir)
109
+ test_dataset = load_from_disk(args.test_dir)
110
+
111
+ logger.info(f" loaded train_dataset length is: {len(train_dataset)}")
112
+ logger.info(f" loaded test_dataset length is: {len(test_dataset)}")
113
+
114
+ # init model
115
+ config = FunnelVaeConfig.from_pretrained(**model_args.__dict__)
116
+ tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, use_fast_tokenizer=True)
117
+
118
+ vocab_size = len(tokenizer)
119
+ config.funnel.vocab_size = vocab_size
120
+ config.t5.vocab_size = vocab_size
121
+ config.vocab_size = vocab_size
122
+ model = FunnelVae(config)
123
+
124
+ model = FunnelVae.from_pretrained()
125
+ tokenizer = AutoTokenizer.from_pretrained(args.model_name)
126
+
127
+ # define training args
128
+ training_args = TrainingArguments(
129
+ output_dir=args.model_dir,
130
+ num_train_epochs=args.epochs,
131
+ per_device_train_batch_size=args.train_batch_size,
132
+ per_device_eval_batch_size=args.eval_batch_size,
133
+ warmup_steps=args.warmup_steps,
134
+ evaluation_strategy="epoch",
135
+ logging_dir=f"{args.output_data_dir}/logs",
136
+ learning_rate=float(args.learning_rate),
137
+ )
138
+
139
+ # create Trainer instance
140
+ trainer = Trainer(
141
+ model=model,
142
+ args=training_args,
143
+ train_dataset=train_dataset,
144
+ eval_dataset=test_dataset,
145
+ tokenizer=tokenizer,
146
+ )
147
+
148
+ # train model
149
+ trainer.train()
150
+
151
+ # evaluate model
152
+ eval_result = trainer.evaluate(eval_dataset=test_dataset)
153
+
154
+ # writes eval result to file which can be accessed later in s3 ouput
155
+ with open(os.path.join(args.output_data_dir, "eval_results.txt"), "w") as writer:
156
+ print(f"***** Eval results *****")
157
+ for key, value in sorted(eval_result.items()):
158
+ writer.write(f"{key} = {value}\n")
159
+
160
+ # Saves the model to s3
161
+ trainer.save_model(args.model_dir)
train.sh ADDED
File without changes