ForzaJuve1
commited on
Commit
•
78b201c
1
Parent(s):
647fa6c
Update Integration.py
Browse files- Integration.py +34 -36
Integration.py
CHANGED
@@ -6,7 +6,6 @@ import sys
|
|
6 |
import ast
|
7 |
import json
|
8 |
import yaml
|
9 |
-
import requests
|
10 |
import pyarrow.parquet as pq
|
11 |
|
12 |
class Euro2020Dataset(GeneratorBasedBuilder):
|
@@ -538,26 +537,25 @@ class Euro2020Dataset(GeneratorBasedBuilder):
|
|
538 |
)
|
539 |
|
540 |
def _split_generators(self, dl_manager):
|
541 |
-
file_url = 'https://
|
542 |
downloaded_file = dl_manager.download(file_url)
|
543 |
-
#dl_path = dl_manager.download_and_extract({'train_csv': file_url})
|
544 |
|
545 |
return [
|
546 |
SplitGenerator(
|
547 |
name="train",
|
548 |
-
gen_kwargs={"filepath": downloaded_file}
|
549 |
),
|
550 |
]
|
551 |
|
552 |
# Increase the maximum field size limit
|
553 |
-
|
554 |
|
555 |
def _generate_examples(self, filepath):
|
556 |
|
557 |
-
table = pq.read_table(filepath)
|
558 |
-
df = table.to_pandas()
|
559 |
#df = pd.read_parquet(filepath)
|
560 |
-
for id, row in df.iterrows():
|
561 |
#MatchEvent_dict = process_nested_dict(row.get("MatchEvent", {}))
|
562 |
#TeamLineUps_dict = process_nested_dict(row.get("TeamLineUps", {}))
|
563 |
#TeamStats_dict = process_nested_dict(row.get("TeamStats", {}))
|
@@ -594,34 +592,34 @@ class Euro2020Dataset(GeneratorBasedBuilder):
|
|
594 |
#'row' is a Pandas Series object representing the row data
|
595 |
#Directly yield the id and row.to_dict() if the DataFrame structure matches the desired output
|
596 |
#yield id, row.to_dict()
|
597 |
-
|
598 |
-
|
599 |
-
|
600 |
|
601 |
-
|
602 |
-
|
603 |
-
|
604 |
-
|
605 |
|
606 |
-
|
607 |
-
|
608 |
-
|
609 |
|
610 |
-
|
611 |
-
|
612 |
-
|
613 |
-
|
614 |
|
615 |
-
|
616 |
-
|
617 |
-
|
618 |
-
|
619 |
-
|
620 |
|
621 |
-
|
622 |
-
|
623 |
-
|
624 |
-
|
625 |
|
626 |
yield id, {
|
627 |
"HomeTeamName": row["HomeTeamName"],
|
@@ -649,10 +647,10 @@ class Euro2020Dataset(GeneratorBasedBuilder):
|
|
649 |
"Humidity": row["Humidity"],
|
650 |
"Temperature": row["Temperature"],
|
651 |
"WindSpeed": row["WindSpeed"],
|
652 |
-
"MatchEvent":
|
653 |
-
"TeamLineUps":
|
654 |
-
"TeamStats":
|
655 |
-
"PlayerStats":
|
656 |
-
"PlayerPreMatchInfo":
|
657 |
}
|
658 |
|
|
|
6 |
import ast
|
7 |
import json
|
8 |
import yaml
|
|
|
9 |
import pyarrow.parquet as pq
|
10 |
|
11 |
class Euro2020Dataset(GeneratorBasedBuilder):
|
|
|
537 |
)
|
538 |
|
539 |
def _split_generators(self, dl_manager):
|
540 |
+
file_url = 'https://drive.google.com/uc?1RKoAQwq68LASd4ret_OKYUkF4B4m6hnu'
|
541 |
downloaded_file = dl_manager.download(file_url)
|
|
|
542 |
|
543 |
return [
|
544 |
SplitGenerator(
|
545 |
name="train",
|
546 |
+
gen_kwargs={"filepath": downloaded_file},
|
547 |
),
|
548 |
]
|
549 |
|
550 |
# Increase the maximum field size limit
|
551 |
+
csv.field_size_limit(sys.maxsize)
|
552 |
|
553 |
def _generate_examples(self, filepath):
|
554 |
|
555 |
+
#table = pq.read_table(filepath)
|
556 |
+
#df = table.to_pandas()
|
557 |
#df = pd.read_parquet(filepath)
|
558 |
+
#for id, row in df.iterrows():
|
559 |
#MatchEvent_dict = process_nested_dict(row.get("MatchEvent", {}))
|
560 |
#TeamLineUps_dict = process_nested_dict(row.get("TeamLineUps", {}))
|
561 |
#TeamStats_dict = process_nested_dict(row.get("TeamStats", {}))
|
|
|
592 |
#'row' is a Pandas Series object representing the row data
|
593 |
#Directly yield the id and row.to_dict() if the DataFrame structure matches the desired output
|
594 |
#yield id, row.to_dict()
|
595 |
+
with open(filepath, encoding="utf-8") as f:
|
596 |
+
reader = pd.read_csv(f)
|
597 |
+
for id, row in enumerate(reader):
|
598 |
|
599 |
+
match_event_str = row["MatchEvent"]
|
600 |
+
match_event_replaced = match_event_str.replace("'", '"')
|
601 |
+
match_event_replaced = match_event_str.replace(": nan: ", ": None: ")
|
602 |
+
match_event = yaml.safe_load(match_event_str)
|
603 |
|
604 |
+
team_linup_str = row["TeamLineUps"]
|
605 |
+
team_linup_replaced = team_linup_str.replace(": nan", ": None")
|
606 |
+
team_linup = ast.literal_eval(team_linup_replaced)
|
607 |
|
608 |
+
team_stats_str = row["TeamStats"]
|
609 |
+
team_stats_replaced = team_stats_str.replace("'", '"')
|
610 |
+
team_stats_replaced = team_stats_replaced.replace(": nan", ": null")
|
611 |
+
team_stats = yaml.safe_load(team_stats_str)
|
612 |
|
613 |
+
player_stats_str = row["PlayerStats"]
|
614 |
+
player_stats_replaced = player_stats_str.replace("'", '"')
|
615 |
+
player_stats_replaced = player_stats_replaced.replace(": nan", ": null")
|
616 |
+
player_stats_replaced = player_stats_str.replace('"', '\"')
|
617 |
+
player_stats = yaml.safe_load(player_stats_replaced)
|
618 |
|
619 |
+
pre_match_str = row["PlayerPreMatchInfo"]
|
620 |
+
pre_match_replaced = pre_match_str.replace("'", '"')
|
621 |
+
pre_match_replaced = pre_match_replaced.replace(": nan", ": null")
|
622 |
+
pre_match_info = yaml.safe_load(pre_match_str)
|
623 |
|
624 |
yield id, {
|
625 |
"HomeTeamName": row["HomeTeamName"],
|
|
|
647 |
"Humidity": row["Humidity"],
|
648 |
"Temperature": row["Temperature"],
|
649 |
"WindSpeed": row["WindSpeed"],
|
650 |
+
"MatchEvent": match_event,
|
651 |
+
"TeamLineUps": team_lineup,
|
652 |
+
"TeamStats": team_stats,
|
653 |
+
"PlayerStats": player_stats,
|
654 |
+
"PlayerPreMatchInfo": pre_match_info
|
655 |
}
|
656 |
|