meg-huggingface commited on
Commit
279e63d
1 Parent(s): 9d4801c

dataset prettifier

Browse files
Files changed (1) hide show
  1. make_pretty_dataset.py +73 -0
make_pretty_dataset.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from huggingface_hub import HfApi
4
+ import glob
5
+ from datetime import datetime
6
+ from datasets import Dataset
7
+
8
+ TOKEN = os.environ.get("HF_WRITE_TOKEN")
9
+ API = HfApi(token=TOKEN)
10
+ REPO_ID = "meg/calculate_carbon_runs"
11
+ UPLOAD_REPO_ID = 'meg/HUGS_energy'
12
+
13
+ output_directory = API.snapshot_download(repo_id=REPO_ID, repo_type='dataset')
14
+ print(output_directory)
15
+ #runs_dir = glob.glob(f"{output_directory}/*")
16
+ #print(runs_dir)
17
+ dataset_results = []
18
+ for task in ['text_generation']:
19
+ hardware_dirs = glob.glob(f"{output_directory}/runs/{task}/*")
20
+ print(hardware_dirs)
21
+ for hardware_dir in hardware_dirs:
22
+ hardware = hardware_dir.split("/")[-1]
23
+ org_dirs = glob.glob(f"{hardware_dir}/*") #runs/{task}/*")
24
+ print(org_dirs)
25
+ for org_dir in org_dirs:
26
+ org = org_dir.split("/")[-1]
27
+ model_dirs = glob.glob(f"{org_dir}/*")
28
+ print(model_dirs)
29
+ for model_dir in model_dirs:
30
+ model = model_dir.split("/")[-1]
31
+ model_runs = glob.glob(f"{model_dir}/*")
32
+ dates = [dir.split("/")[-1] for dir in model_runs]
33
+ try:
34
+ # Sort dates as dates
35
+ sorted_dates = sorted(
36
+ [datetime.strptime(date, '%Y-%m-%d-%H-%M-%S') for date in
37
+ dates])
38
+ # Convert back to string format
39
+ sorted_dates_str = [date.strftime('%Y-%m-%d-%H-%M-%S') for date in
40
+ sorted_dates]
41
+ last_date = sorted_dates_str[-1]
42
+ most_recent_run = f"{model_dir}/{last_date}"
43
+ print(most_recent_run)
44
+ try:
45
+ benchmark_report = json.loads(open(f"{most_recent_run}/benchmark_report.json", "rb+").read())
46
+ print(benchmark_report)
47
+ prefill_data = benchmark_report['prefill']
48
+ prefill_energy = prefill_data['energy']
49
+ prefill_efficiency = prefill_data['efficiency']
50
+ decode_data = benchmark_report['decode']
51
+ decode_energy = decode_data['energy']
52
+ decode_efficiency = decode_data['efficiency']
53
+ preprocess_data = benchmark_report['preprocess']
54
+ preprocess_energy = preprocess_data['energy']
55
+ preprocess_efficiency = preprocess_data['efficiency']
56
+ dataset_results += [{'task':task, 'org':org, 'model':model, 'hardware':hardware,
57
+ 'date':last_date, 'prefill':{'energy':prefill_energy,
58
+ 'efficency':prefill_efficiency},
59
+ 'decode':{'energy':decode_energy, 'efficiency':decode_efficiency},
60
+ 'preprocess': {'energy':preprocess_energy, 'efficiency': preprocess_efficiency}},]
61
+
62
+ except FileNotFoundError:
63
+ error_report = open(f"{most_recent_run}/error.log", "rb+").read()
64
+ print(error_report)
65
+ except ValueError:
66
+ # Not a directory with a timestamp.
67
+ continue
68
+
69
+ print("*****")
70
+ print(dataset_results)
71
+ hub_dataset_results = Dataset.from_list(dataset_results)
72
+ print(hub_dataset_results)
73
+ hub_dataset_results.push_to_hub(UPLOAD_REPO_ID, token=TOKEN)