sasha HF staff commited on
Commit
00684f3
1 Parent(s): 6e6a148

Upload folder using huggingface_hub

Browse files
.hydra/config.yaml CHANGED
@@ -2,7 +2,7 @@ backend:
2
  name: pytorch
3
  version: 2.4.0
4
  _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
5
- task: text-generation
6
  model: cardiffnlp/twitter-roberta-base-sentiment-latest
7
  processor: cardiffnlp/twitter-roberta-base-sentiment-latest
8
  library: null
@@ -33,13 +33,13 @@ backend:
33
  launcher:
34
  name: process
35
  _target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
36
- device_isolation: false
37
  device_isolation_action: warn
38
  start_method: spawn
39
  benchmark:
40
  name: energy_star
41
  _target_: optimum_benchmark.benchmarks.energy_star.benchmark.EnergyStarBenchmark
42
- dataset_name: EnergyStarAI/text_generation
43
  dataset_config: ''
44
  dataset_split: train
45
  num_samples: 1000
@@ -62,11 +62,9 @@ benchmark:
62
  warmup_runs: 10
63
  energy: true
64
  forward_kwargs: {}
65
- generate_kwargs:
66
- max_new_tokens: 10
67
- min_new_tokens: 10
68
  call_kwargs: {}
69
- experiment_name: text_generation
70
  environment:
71
  cpu: ' AMD EPYC 7R32'
72
  cpu_count: 48
 
2
  name: pytorch
3
  version: 2.4.0
4
  _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
5
+ task: text-classification
6
  model: cardiffnlp/twitter-roberta-base-sentiment-latest
7
  processor: cardiffnlp/twitter-roberta-base-sentiment-latest
8
  library: null
 
33
  launcher:
34
  name: process
35
  _target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
36
+ device_isolation: true
37
  device_isolation_action: warn
38
  start_method: spawn
39
  benchmark:
40
  name: energy_star
41
  _target_: optimum_benchmark.benchmarks.energy_star.benchmark.EnergyStarBenchmark
42
+ dataset_name: EnergyStarAI/text_classification
43
  dataset_config: ''
44
  dataset_split: train
45
  num_samples: 1000
 
62
  warmup_runs: 10
63
  energy: true
64
  forward_kwargs: {}
65
+ generate_kwargs: {}
 
 
66
  call_kwargs: {}
67
+ experiment_name: text_classification
68
  environment:
69
  cpu: ' AMD EPYC 7R32'
70
  cpu_count: 48
.hydra/hydra.yaml CHANGED
@@ -2,7 +2,7 @@ hydra:
2
  run:
3
  dir: ./runs/
4
  sweep:
5
- dir: sweeps/${experiment_name}/${backend.model}/${now:%Y-%m-%d-%H-%M-%S}
6
  subdir: ${hydra.job.num}
7
  launcher:
8
  _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
@@ -129,7 +129,7 @@ hydra:
129
  override_dirname: backend.model=cardiffnlp/twitter-roberta-base-sentiment-latest,backend.processor=cardiffnlp/twitter-roberta-base-sentiment-latest
130
  id: ???
131
  num: ???
132
- config_name: text_generation
133
  env_set:
134
  OVERRIDE_BENCHMARKS: '1'
135
  env_copy: []
 
2
  run:
3
  dir: ./runs/
4
  sweep:
5
+ dir: sweeps/${experiment_name}/${now:%Y-%m-%d-%H-%M-%S}
6
  subdir: ${hydra.job.num}
7
  launcher:
8
  _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
 
129
  override_dirname: backend.model=cardiffnlp/twitter-roberta-base-sentiment-latest,backend.processor=cardiffnlp/twitter-roberta-base-sentiment-latest
130
  id: ???
131
  num: ???
132
+ config_name: text_classification
133
  env_set:
134
  OVERRIDE_BENCHMARKS: '1'
135
  env_copy: []
cli.log CHANGED
@@ -1,133 +1,31 @@
1
- [2024-10-07 18:19:03,057][launcher][INFO] - ََAllocating process launcher
2
- [2024-10-07 18:19:03,057][process][INFO] - + Setting multiprocessing start method to spawn.
3
- [2024-10-07 18:19:03,072][device-isolation][INFO] - + Launched device(s) isolation process 178
4
- [2024-10-07 18:19:03,072][device-isolation][INFO] - + Isolating device(s) [0]
5
- [2024-10-07 18:19:03,078][process][INFO] - + Launched benchmark in isolated process 179.
6
- [PROC-0][2024-10-07 18:19:05,709][datasets][INFO] - PyTorch version 2.4.0 available.
7
- [PROC-0][2024-10-07 18:19:06,961][backend][INFO] - َAllocating pytorch backend
8
- [PROC-0][2024-10-07 18:19:06,962][backend][INFO] - + Setting random seed to 42
9
- [PROC-0][2024-10-07 18:19:07,843][pytorch][INFO] - + Using AutoModel class AutoModelForSequenceClassification
10
- [PROC-0][2024-10-07 18:19:07,844][pytorch][INFO] - + Creating backend temporary directory
11
- [PROC-0][2024-10-07 18:19:07,844][pytorch][INFO] - + Loading model with random weights
12
- [PROC-0][2024-10-07 18:19:07,844][pytorch][INFO] - + Creating no weights model
13
- [PROC-0][2024-10-07 18:19:07,844][pytorch][INFO] - + Creating no weights model directory
14
- [PROC-0][2024-10-07 18:19:07,844][pytorch][INFO] - + Creating no weights model state dict
15
- [PROC-0][2024-10-07 18:19:07,871][pytorch][INFO] - + Saving no weights model safetensors
16
- [PROC-0][2024-10-07 18:19:07,872][pytorch][INFO] - + Saving no weights model pretrained config
17
- [PROC-0][2024-10-07 18:19:07,872][pytorch][INFO] - + Loading no weights AutoModel
18
- [PROC-0][2024-10-07 18:19:07,872][pytorch][INFO] - + Loading model directly on device: cuda
19
- [PROC-0][2024-10-07 18:19:08,077][pytorch][INFO] - + Turning on model's eval mode
20
- [PROC-0][2024-10-07 18:19:08,084][benchmark][INFO] - Allocating energy_star benchmark
21
- [PROC-0][2024-10-07 18:19:08,084][energy_star][INFO] - + Loading raw dataset
22
- [PROC-0][2024-10-07 18:19:08,967][energy_star][INFO] - + Initializing Inference report
23
- [PROC-0][2024-10-07 18:19:08,968][energy][INFO] - + Tracking GPU energy on devices [0]
24
- [PROC-0][2024-10-07 18:19:13,187][energy_star][INFO] - + Preprocessing dataset
25
- [PROC-0][2024-10-07 18:19:13,598][energy][INFO] - + Saving codecarbon emission data to preprocess_codecarbon.json
26
- [PROC-0][2024-10-07 18:19:13,598][energy_star][INFO] - + Preparing backend for Inference
27
- [PROC-0][2024-10-07 18:19:13,598][energy_star][INFO] - + Initialising dataloader
28
- [PROC-0][2024-10-07 18:19:13,598][energy_star][INFO] - + Warming up backend for Inference
29
- [PROC-0][2024-10-07 18:19:14,207][energy_star][INFO] - + Running Inference energy tracking for 10 iterations
30
- [PROC-0][2024-10-07 18:19:14,208][energy_star][INFO] - + Iteration 1/10
31
- [2024-10-07 18:19:15,147][experiment][ERROR] - Error during experiment
32
- [2024-10-07 18:19:18,425][launcher][INFO] - ََAllocating process launcher
33
- [2024-10-07 18:19:18,425][process][INFO] - + Setting multiprocessing start method to spawn.
34
- [2024-10-07 18:19:18,439][process][INFO] - + Launched benchmark in isolated process 423.
35
- [PROC-0][2024-10-07 18:19:21,034][datasets][INFO] - PyTorch version 2.4.0 available.
36
- [PROC-0][2024-10-07 18:19:21,939][backend][INFO] - َAllocating pytorch backend
37
- [PROC-0][2024-10-07 18:19:21,940][backend][INFO] - + Setting random seed to 42
38
- [PROC-0][2024-10-07 18:19:22,505][pytorch][INFO] - + Using AutoModel class AutoModelForCausalLM
39
- [PROC-0][2024-10-07 18:19:22,505][pytorch][INFO] - + Creating backend temporary directory
40
- [PROC-0][2024-10-07 18:19:22,505][pytorch][INFO] - + Loading model with random weights
41
- [PROC-0][2024-10-07 18:19:22,505][pytorch][INFO] - + Creating no weights model
42
- [PROC-0][2024-10-07 18:19:22,505][pytorch][INFO] - + Creating no weights model directory
43
- [PROC-0][2024-10-07 18:19:22,505][pytorch][INFO] - + Creating no weights model state dict
44
- [PROC-0][2024-10-07 18:19:22,527][pytorch][INFO] - + Saving no weights model safetensors
45
- [PROC-0][2024-10-07 18:19:22,528][pytorch][INFO] - + Saving no weights model pretrained config
46
- [PROC-0][2024-10-07 18:19:22,529][pytorch][INFO] - + Loading no weights AutoModel
47
- [PROC-0][2024-10-07 18:19:22,529][pytorch][INFO] - + Loading model directly on device: cuda
48
- [PROC-0][2024-10-07 18:19:22,697][pytorch][INFO] - + Turning on model's eval mode
49
- [PROC-0][2024-10-07 18:19:22,703][benchmark][INFO] - Allocating energy_star benchmark
50
- [PROC-0][2024-10-07 18:19:22,703][energy_star][INFO] - + Loading raw dataset
51
- [PROC-0][2024-10-07 18:19:23,663][energy_star][INFO] - + Updating Text Generation kwargs with default values
52
- [PROC-0][2024-10-07 18:19:23,663][energy_star][INFO] - + Initializing Text Generation report
53
- [PROC-0][2024-10-07 18:19:23,663][energy][INFO] - + Tracking GPU energy on devices [0]
54
- [PROC-0][2024-10-07 18:19:27,873][energy_star][INFO] - + Preprocessing dataset
55
- [PROC-0][2024-10-07 18:19:28,795][energy][INFO] - + Saving codecarbon emission data to preprocess_codecarbon.json
56
- [PROC-0][2024-10-07 18:19:28,796][energy_star][INFO] - + Preparing backend for Inference
57
- [PROC-0][2024-10-07 18:19:28,796][energy_star][INFO] - + Initialising dataloader
58
- [PROC-0][2024-10-07 18:19:28,796][energy_star][INFO] - + Warming up backend for Inference
59
- [PROC-0][2024-10-07 18:19:29,524][energy_star][INFO] - + Additional warmup for Text Generation
60
- [PROC-0][2024-10-07 18:19:29,607][energy_star][INFO] - + Running Text Generation energy tracking for 10 iterations
61
- [PROC-0][2024-10-07 18:19:29,607][energy_star][INFO] - + Prefill iteration 1/10
62
- [PROC-0][2024-10-07 18:19:39,586][energy][INFO] - + Saving codecarbon emission data to prefill_codecarbon.json
63
- [PROC-0][2024-10-07 18:19:39,587][energy_star][INFO] - + Prefill iteration 2/10
64
- [PROC-0][2024-10-07 18:19:49,515][energy][INFO] - + Saving codecarbon emission data to prefill_codecarbon.json
65
- [PROC-0][2024-10-07 18:19:49,515][energy_star][INFO] - + Prefill iteration 3/10
66
- [PROC-0][2024-10-07 18:19:59,491][energy][INFO] - + Saving codecarbon emission data to prefill_codecarbon.json
67
- [PROC-0][2024-10-07 18:19:59,491][energy_star][INFO] - + Prefill iteration 4/10
68
- [PROC-0][2024-10-07 18:20:09,475][energy][INFO] - + Saving codecarbon emission data to prefill_codecarbon.json
69
- [PROC-0][2024-10-07 18:20:09,476][energy_star][INFO] - + Prefill iteration 5/10
70
- [PROC-0][2024-10-07 18:20:19,452][energy][INFO] - + Saving codecarbon emission data to prefill_codecarbon.json
71
- [PROC-0][2024-10-07 18:20:19,452][energy_star][INFO] - + Prefill iteration 6/10
72
- [PROC-0][2024-10-07 18:20:29,437][energy][INFO] - + Saving codecarbon emission data to prefill_codecarbon.json
73
- [PROC-0][2024-10-07 18:20:29,437][energy_star][INFO] - + Prefill iteration 7/10
74
- [PROC-0][2024-10-07 18:20:39,386][energy][INFO] - + Saving codecarbon emission data to prefill_codecarbon.json
75
- [PROC-0][2024-10-07 18:20:39,387][energy_star][INFO] - + Prefill iteration 8/10
76
- [PROC-0][2024-10-07 18:20:49,329][energy][INFO] - + Saving codecarbon emission data to prefill_codecarbon.json
77
- [PROC-0][2024-10-07 18:20:49,329][energy_star][INFO] - + Prefill iteration 9/10
78
- [PROC-0][2024-10-07 18:20:59,249][energy][INFO] - + Saving codecarbon emission data to prefill_codecarbon.json
79
- [PROC-0][2024-10-07 18:20:59,250][energy_star][INFO] - + Prefill iteration 10/10
80
- [PROC-0][2024-10-07 18:21:09,177][energy][INFO] - + Saving codecarbon emission data to prefill_codecarbon.json
81
- [PROC-0][2024-10-07 18:21:09,177][energy_star][INFO] - + Decoding iteration 1/10
82
- [2024-10-07 18:21:10,260][experiment][ERROR] - Error during experiment
83
- [2024-10-07 18:21:13,524][launcher][INFO] - ََAllocating process launcher
84
- [2024-10-07 18:21:13,524][process][INFO] - + Setting multiprocessing start method to spawn.
85
- [2024-10-07 18:21:13,538][process][INFO] - + Launched benchmark in isolated process 608.
86
- [PROC-0][2024-10-07 18:21:16,135][datasets][INFO] - PyTorch version 2.4.0 available.
87
- [PROC-0][2024-10-07 18:21:17,043][backend][INFO] - َAllocating pytorch backend
88
- [PROC-0][2024-10-07 18:21:17,043][backend][INFO] - + Setting random seed to 42
89
- [PROC-0][2024-10-07 18:21:17,540][pytorch][INFO] - + Using AutoModel class AutoModelForCausalLM
90
- [PROC-0][2024-10-07 18:21:17,541][pytorch][INFO] - + Creating backend temporary directory
91
- [PROC-0][2024-10-07 18:21:17,541][pytorch][INFO] - + Loading model with random weights
92
- [PROC-0][2024-10-07 18:21:17,541][pytorch][INFO] - + Creating no weights model
93
- [PROC-0][2024-10-07 18:21:17,541][pytorch][INFO] - + Creating no weights model directory
94
- [PROC-0][2024-10-07 18:21:17,541][pytorch][INFO] - + Creating no weights model state dict
95
- [PROC-0][2024-10-07 18:21:17,563][pytorch][INFO] - + Saving no weights model safetensors
96
- [PROC-0][2024-10-07 18:21:17,564][pytorch][INFO] - + Saving no weights model pretrained config
97
- [PROC-0][2024-10-07 18:21:17,565][pytorch][INFO] - + Loading no weights AutoModel
98
- [PROC-0][2024-10-07 18:21:17,565][pytorch][INFO] - + Loading model directly on device: cuda
99
- [PROC-0][2024-10-07 18:21:17,725][pytorch][INFO] - + Turning on model's eval mode
100
- [PROC-0][2024-10-07 18:21:17,731][benchmark][INFO] - Allocating energy_star benchmark
101
- [PROC-0][2024-10-07 18:21:17,732][energy_star][INFO] - + Loading raw dataset
102
- [PROC-0][2024-10-07 18:21:18,176][energy_star][INFO] - + Updating Text Generation kwargs with default values
103
- [PROC-0][2024-10-07 18:21:18,176][energy_star][INFO] - + Initializing Text Generation report
104
- [PROC-0][2024-10-07 18:21:18,176][energy][INFO] - + Tracking GPU energy on devices [0]
105
- [PROC-0][2024-10-07 18:21:22,361][energy_star][INFO] - + Preprocessing dataset
106
- [PROC-0][2024-10-07 18:21:22,394][energy][INFO] - + Saving codecarbon emission data to preprocess_codecarbon.json
107
- [PROC-0][2024-10-07 18:21:22,394][energy_star][INFO] - + Preparing backend for Inference
108
- [PROC-0][2024-10-07 18:21:22,394][energy_star][INFO] - + Initialising dataloader
109
- [PROC-0][2024-10-07 18:21:22,395][energy_star][INFO] - + Warming up backend for Inference
110
- [PROC-0][2024-10-07 18:21:23,134][energy_star][INFO] - + Additional warmup for Text Generation
111
- [PROC-0][2024-10-07 18:21:23,217][energy_star][INFO] - + Running Text Generation energy tracking for 10 iterations
112
- [PROC-0][2024-10-07 18:21:23,217][energy_star][INFO] - + Prefill iteration 1/10
113
- [PROC-0][2024-10-07 18:21:33,219][energy][INFO] - + Saving codecarbon emission data to prefill_codecarbon.json
114
- [PROC-0][2024-10-07 18:21:33,219][energy_star][INFO] - + Prefill iteration 2/10
115
- [PROC-0][2024-10-07 18:21:43,173][energy][INFO] - + Saving codecarbon emission data to prefill_codecarbon.json
116
- [PROC-0][2024-10-07 18:21:43,174][energy_star][INFO] - + Prefill iteration 3/10
117
- [PROC-0][2024-10-07 18:21:53,105][energy][INFO] - + Saving codecarbon emission data to prefill_codecarbon.json
118
- [PROC-0][2024-10-07 18:21:53,106][energy_star][INFO] - + Prefill iteration 4/10
119
- [PROC-0][2024-10-07 18:22:03,003][energy][INFO] - + Saving codecarbon emission data to prefill_codecarbon.json
120
- [PROC-0][2024-10-07 18:22:03,003][energy_star][INFO] - + Prefill iteration 5/10
121
- [PROC-0][2024-10-07 18:22:12,906][energy][INFO] - + Saving codecarbon emission data to prefill_codecarbon.json
122
- [PROC-0][2024-10-07 18:22:12,906][energy_star][INFO] - + Prefill iteration 6/10
123
- [PROC-0][2024-10-07 18:22:22,796][energy][INFO] - + Saving codecarbon emission data to prefill_codecarbon.json
124
- [PROC-0][2024-10-07 18:22:22,796][energy_star][INFO] - + Prefill iteration 7/10
125
- [PROC-0][2024-10-07 18:22:32,699][energy][INFO] - + Saving codecarbon emission data to prefill_codecarbon.json
126
- [PROC-0][2024-10-07 18:22:32,700][energy_star][INFO] - + Prefill iteration 8/10
127
- [PROC-0][2024-10-07 18:22:42,667][energy][INFO] - + Saving codecarbon emission data to prefill_codecarbon.json
128
- [PROC-0][2024-10-07 18:22:42,667][energy_star][INFO] - + Prefill iteration 9/10
129
- [PROC-0][2024-10-07 18:22:52,604][energy][INFO] - + Saving codecarbon emission data to prefill_codecarbon.json
130
- [PROC-0][2024-10-07 18:22:52,604][energy_star][INFO] - + Prefill iteration 10/10
131
- [PROC-0][2024-10-07 18:23:02,551][energy][INFO] - + Saving codecarbon emission data to prefill_codecarbon.json
132
- [PROC-0][2024-10-07 18:23:02,551][energy_star][INFO] - + Decoding iteration 1/10
133
- [2024-10-07 18:23:03,646][experiment][ERROR] - Error during experiment
 
1
+ [2024-10-07 19:38:56,544][launcher][INFO] - ََAllocating process launcher
2
+ [2024-10-07 19:38:56,544][process][INFO] - + Setting multiprocessing start method to spawn.
3
+ [2024-10-07 19:38:56,561][device-isolation][INFO] - + Launched device(s) isolation process 179
4
+ [2024-10-07 19:38:56,561][device-isolation][INFO] - + Isolating device(s) [0]
5
+ [2024-10-07 19:38:56,567][process][INFO] - + Launched benchmark in isolated process 180.
6
+ [PROC-0][2024-10-07 19:38:59,407][datasets][INFO] - PyTorch version 2.4.0 available.
7
+ [PROC-0][2024-10-07 19:39:00,877][backend][INFO] - َAllocating pytorch backend
8
+ [PROC-0][2024-10-07 19:39:00,877][backend][INFO] - + Setting random seed to 42
9
+ [PROC-0][2024-10-07 19:39:01,633][pytorch][INFO] - + Using AutoModel class AutoModelForSequenceClassification
10
+ [PROC-0][2024-10-07 19:39:01,634][pytorch][INFO] - + Creating backend temporary directory
11
+ [PROC-0][2024-10-07 19:39:01,634][pytorch][INFO] - + Loading model with random weights
12
+ [PROC-0][2024-10-07 19:39:01,634][pytorch][INFO] - + Creating no weights model
13
+ [PROC-0][2024-10-07 19:39:01,634][pytorch][INFO] - + Creating no weights model directory
14
+ [PROC-0][2024-10-07 19:39:01,634][pytorch][INFO] - + Creating no weights model state dict
15
+ [PROC-0][2024-10-07 19:39:01,643][pytorch][INFO] - + Saving no weights model safetensors
16
+ [PROC-0][2024-10-07 19:39:01,643][pytorch][INFO] - + Saving no weights model pretrained config
17
+ [PROC-0][2024-10-07 19:39:01,644][pytorch][INFO] - + Loading no weights AutoModel
18
+ [PROC-0][2024-10-07 19:39:01,644][pytorch][INFO] - + Loading model directly on device: cuda
19
+ [PROC-0][2024-10-07 19:39:01,871][pytorch][INFO] - + Turning on model's eval mode
20
+ [PROC-0][2024-10-07 19:39:01,878][benchmark][INFO] - Allocating energy_star benchmark
21
+ [PROC-0][2024-10-07 19:39:01,878][energy_star][INFO] - + Loading raw dataset
22
+ [PROC-0][2024-10-07 19:39:02,892][energy_star][INFO] - + Initializing Inference report
23
+ [PROC-0][2024-10-07 19:39:02,893][energy][INFO] - + Tracking GPU energy on devices [0]
24
+ [PROC-0][2024-10-07 19:39:07,094][energy_star][INFO] - + Preprocessing dataset
25
+ [PROC-0][2024-10-07 19:39:07,507][energy][INFO] - + Saving codecarbon emission data to preprocess_codecarbon.json
26
+ [PROC-0][2024-10-07 19:39:07,507][energy_star][INFO] - + Preparing backend for Inference
27
+ [PROC-0][2024-10-07 19:39:07,508][energy_star][INFO] - + Initialising dataloader
28
+ [PROC-0][2024-10-07 19:39:07,508][energy_star][INFO] - + Warming up backend for Inference
29
+ [PROC-0][2024-10-07 19:39:08,176][energy_star][INFO] - + Running Inference energy tracking for 10 iterations
30
+ [PROC-0][2024-10-07 19:39:08,177][energy_star][INFO] - + Iteration 1/10
31
+ [2024-10-07 19:39:09,147][experiment][ERROR] - Error during experiment
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
error.log CHANGED
The diff for this file is too large to render. See raw diff
 
experiment_config.json CHANGED
@@ -1,10 +1,10 @@
1
  {
2
- "experiment_name": "text_generation",
3
  "backend": {
4
  "name": "pytorch",
5
  "version": "2.4.0",
6
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
7
- "task": "text-generation",
8
  "model": "cardiffnlp/twitter-roberta-base-sentiment-latest",
9
  "processor": "cardiffnlp/twitter-roberta-base-sentiment-latest",
10
  "library": "transformers",
@@ -41,14 +41,14 @@
41
  "launcher": {
42
  "name": "process",
43
  "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
44
- "device_isolation": false,
45
  "device_isolation_action": "warn",
46
  "start_method": "spawn"
47
  },
48
  "benchmark": {
49
  "name": "energy_star",
50
  "_target_": "optimum_benchmark.benchmarks.energy_star.benchmark.EnergyStarBenchmark",
51
- "dataset_name": "EnergyStarAI/text_generation",
52
  "dataset_config": "",
53
  "dataset_split": "train",
54
  "num_samples": 1000,
@@ -72,10 +72,7 @@
72
  "warmup_runs": 10,
73
  "energy": true,
74
  "forward_kwargs": {},
75
- "generate_kwargs": {
76
- "max_new_tokens": 10,
77
- "min_new_tokens": 10
78
- },
79
  "call_kwargs": {}
80
  },
81
  "environment": {
 
1
  {
2
+ "experiment_name": "text_classification",
3
  "backend": {
4
  "name": "pytorch",
5
  "version": "2.4.0",
6
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
7
+ "task": "text-classification",
8
  "model": "cardiffnlp/twitter-roberta-base-sentiment-latest",
9
  "processor": "cardiffnlp/twitter-roberta-base-sentiment-latest",
10
  "library": "transformers",
 
41
  "launcher": {
42
  "name": "process",
43
  "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
44
+ "device_isolation": true,
45
  "device_isolation_action": "warn",
46
  "start_method": "spawn"
47
  },
48
  "benchmark": {
49
  "name": "energy_star",
50
  "_target_": "optimum_benchmark.benchmarks.energy_star.benchmark.EnergyStarBenchmark",
51
+ "dataset_name": "EnergyStarAI/text_classification",
52
  "dataset_config": "",
53
  "dataset_split": "train",
54
  "num_samples": 1000,
 
72
  "warmup_runs": 10,
73
  "energy": true,
74
  "forward_kwargs": {},
75
+ "generate_kwargs": {},
 
 
 
76
  "call_kwargs": {}
77
  },
78
  "environment": {
preprocess_codecarbon.json CHANGED
@@ -1,17 +1,17 @@
1
  {
2
- "timestamp": "2024-10-07T18:21:22",
3
  "project_name": "codecarbon",
4
- "run_id": "7c0f3a34-f2fd-4b75-8053-9a1cffb59eae",
5
- "duration": -1727837488.2322512,
6
- "emissions": 1.4065464079452445e-07,
7
- "emissions_rate": 4.680032608026597e-06,
8
  "cpu_power": 42.5,
9
- "gpu_power": 0.0,
10
- "ram_power": 0.2622342109680176,
11
- "cpu_energy": 3.789163760100362e-07,
12
- "gpu_energy": 0.0,
13
- "ram_energy": 2.122750170069354e-09,
14
- "energy_consumed": 3.8103912618010555e-07,
15
  "country_name": "United States",
16
  "country_iso_code": "USA",
17
  "region": "virginia",
 
1
  {
2
+ "timestamp": "2024-10-07T19:39:07",
3
  "project_name": "codecarbon",
4
+ "run_id": "774e7719-f934-4399-b7a4-a1b8217e79d4",
5
+ "duration": -1727736284.8784947,
6
+ "emissions": 4.616095917489861e-06,
7
+ "emissions_rate": 1.1266085626931122e-05,
8
  "cpu_power": 42.5,
9
+ "gpu_power": 66.90181316396337,
10
+ "ram_power": 0.2709217071533203,
11
+ "cpu_energy": 4.862197849191338e-06,
12
+ "gpu_energy": 7.612228312581237e-06,
13
+ "ram_energy": 3.076488509377195e-08,
14
+ "energy_consumed": 1.2505191046866349e-05,
15
  "country_name": "United States",
16
  "country_iso_code": "USA",
17
  "region": "virginia",