ForzaJuve1
commited on
Commit
•
c2ebe23
1
Parent(s):
19d79cd
Update Integration.py
Browse files- Integration.py +14 -31
Integration.py
CHANGED
@@ -537,45 +537,28 @@ class Euro2020Dataset(GeneratorBasedBuilder):
|
|
537 |
citation="Optional citation"
|
538 |
)
|
539 |
|
540 |
-
def
|
541 |
-
|
542 |
-
|
543 |
-
with open(filename, 'wb') as f:
|
544 |
-
for chunk in r.iter_content(chunk_size=8192):
|
545 |
-
f.write(chunk)
|
546 |
-
|
547 |
-
file_url = "https://drive.google.com/uc?id=1QiF2WnnI2cxFDpLoetabx9Pe-DgUuV81"
|
548 |
-
download_file(file_url, 'data.parquet')
|
549 |
-
|
550 |
-
#def _split_generators(self, dl_manager):
|
551 |
-
#file_url = 'https://github.com/gchcg/Projects/raw/main/Euro2020.parquet' #"https://drive.google.com/uc?id=1QiF2WnnI2cxFDpLoetabx9Pe-DgUuV81"
|
552 |
-
#1RKoAQwq68LASd4ret_OKYUkF4B4m6hnu
|
553 |
-
#downloaded_file = dl_manager.download(file_url)
|
554 |
#dl_path = dl_manager.download_and_extract({'train_csv': file_url})
|
555 |
|
556 |
-
|
557 |
-
|
558 |
-
|
559 |
-
|
560 |
-
|
561 |
-
|
562 |
|
563 |
# Increase the maximum field size limit
|
564 |
#csv.field_size_limit(sys.maxsize)
|
565 |
-
|
566 |
def _generate_examples(self, filepath):
|
567 |
-
|
568 |
-
df = table.to_pandas()
|
569 |
for id, row in df.iterrows():
|
|
|
|
|
|
|
570 |
yield id, row.to_dict()
|
571 |
-
|
572 |
-
#def _generate_examples(self, filepath):
|
573 |
-
#df = pd.read_parquet(filepath)
|
574 |
-
#for id, row in df.iterrows():
|
575 |
-
# 'id' here is the index of the row in the DataFrame
|
576 |
-
# 'row' is a Pandas Series object representing the row data
|
577 |
-
# Directly yield the id and row.to_dict() if the DataFrame structure matches the desired output
|
578 |
-
#yield id, row.to_dict()
|
579 |
#with open(filepath, encoding="utf-8") as f:
|
580 |
#reader = pd.read_parquet(f)
|
581 |
#for id, row in enumerate(reader):
|
|
|
537 |
citation="Optional citation"
|
538 |
)
|
539 |
|
540 |
+
def _split_generators(self, dl_manager):
|
541 |
+
file_url = "https://drive.google.com/uc?id=1QiF2WnnI2cxFDpLoetabx9Pe-DgUuV81"
|
542 |
+
downloaded_file = dl_manager.download(file_url)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
543 |
#dl_path = dl_manager.download_and_extract({'train_csv': file_url})
|
544 |
|
545 |
+
return [
|
546 |
+
SplitGenerator(
|
547 |
+
name="train",
|
548 |
+
gen_kwargs={"filepath": downloaded_file},#dl_path['train_csv']},
|
549 |
+
),
|
550 |
+
]
|
551 |
|
552 |
# Increase the maximum field size limit
|
553 |
#csv.field_size_limit(sys.maxsize)
|
554 |
+
|
555 |
def _generate_examples(self, filepath):
|
556 |
+
df = pd.read_parquet(filepath)
|
|
|
557 |
for id, row in df.iterrows():
|
558 |
+
#'id' here is the index of the row in the DataFrame
|
559 |
+
#'row' is a Pandas Series object representing the row data
|
560 |
+
#Directly yield the id and row.to_dict() if the DataFrame structure matches the desired output
|
561 |
yield id, row.to_dict()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
562 |
#with open(filepath, encoding="utf-8") as f:
|
563 |
#reader = pd.read_parquet(f)
|
564 |
#for id, row in enumerate(reader):
|