repo_name
stringclasses 5
values | repo_url
stringclasses 5
values | repo_description
stringclasses 3
values | repo_stars
int64 6
15.8k
| repo_forks
int64 192
3.6k
| repo_last_updated
stringclasses 5
values | repo_created_at
stringclasses 5
values | repo_size
int64 513
2.13k
| repo_license
stringclasses 4
values | language
stringclasses 2
values | text
stringlengths 0
27.5k
| avg_line_length
float64 0
74.3
| max_line_length
int64 0
652
| alphnanum_fraction
float64 0
0.8
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import os
import logging
from airflow import DAG
from airflow.utils.dates import days_ago
from airflow.operators.bash import BashOperator
from airflow.operators.python import PythonOperator
from google.cloud import storage
from airflow.providers.google.cloud.operators.bigquery import BigQueryCreateExternalTableOperator
import pyarrow.csv as pv
import pyarrow.parquet as pq
PROJECT_ID = os.environ.get("GCP_PROJECT_ID")
BUCKET = os.environ.get("GCP_GCS_BUCKET")
dataset_file = "yellow_tripdata_2021-01.csv"
dataset_url = f"https://s3.amazonaws.com/nyc-tlc/trip+data/{dataset_file}"
path_to_local_home = os.environ.get("AIRFLOW_HOME", "/opt/airflow/")
parquet_file = dataset_file.replace('.csv', '.parquet')
BIGQUERY_DATASET = os.environ.get("BIGQUERY_DATASET", 'trips_data_all')
def format_to_parquet(src_file):
if not src_file.endswith('.csv'):
logging.error("Can only accept source files in CSV format, for the moment")
return
table = pv.read_csv(src_file)
pq.write_table(table, src_file.replace('.csv', '.parquet'))
# NOTE: takes 20 mins, at an upload speed of 800kbps. Faster if your internet has a better upload speed
def upload_to_gcs(bucket, object_name, local_file):
"""
Ref: https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python
:param bucket: GCS bucket name
:param object_name: target path & file-name
:param local_file: source path & file-name
:return:
"""
# WORKAROUND to prevent timeout for files > 6 MB on 800 kbps upload speed.
# (Ref: https://github.com/googleapis/python-storage/issues/74)
storage.blob._MAX_MULTIPART_SIZE = 5 * 1024 * 1024 # 5 MB
storage.blob._DEFAULT_CHUNKSIZE = 5 * 1024 * 1024 # 5 MB
# End of Workaround
client = storage.Client()
bucket = client.bucket(bucket)
blob = bucket.blob(object_name)
blob.upload_from_filename(local_file)
default_args = {
"owner": "airflow",
"start_date": days_ago(1),
"depends_on_past": False,
"retries": 1,
}
# NOTE: DAG declaration - using a Context Manager (an implicit way)
with DAG(
dag_id="data_ingestion_gcs_dag",
schedule_interval="@daily",
default_args=default_args,
catchup=False,
max_active_runs=1,
tags=['dtc-de'],
) as dag:
download_dataset_task = BashOperator(
task_id="download_dataset_task",
bash_command=f"curl -sSL {dataset_url} > {path_to_local_home}/{dataset_file}"
)
format_to_parquet_task = PythonOperator(
task_id="format_to_parquet_task",
python_callable=format_to_parquet,
op_kwargs={
"src_file": f"{path_to_local_home}/{dataset_file}",
},
)
# TODO: Homework - research and try XCOM to communicate output values between 2 tasks/operators
local_to_gcs_task = PythonOperator(
task_id="local_to_gcs_task",
python_callable=upload_to_gcs,
op_kwargs={
"bucket": BUCKET,
"object_name": f"raw/{parquet_file}",
"local_file": f"{path_to_local_home}/{parquet_file}",
},
)
bigquery_external_table_task = BigQueryCreateExternalTableOperator(
task_id="bigquery_external_table_task",
table_resource={
"tableReference": {
"projectId": PROJECT_ID,
"datasetId": BIGQUERY_DATASET,
"tableId": "external_table",
},
"externalDataConfiguration": {
"sourceFormat": "PARQUET",
"sourceUris": [f"gs://{BUCKET}/raw/{parquet_file}"],
},
},
)
download_dataset_task >> format_to_parquet_task >> local_to_gcs_task >> bigquery_external_table_task
| 32.423423 | 104 | 0.65031 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import os
from datetime import datetime
from airflow import DAG
from airflow.operators.bash import BashOperator
from airflow.operators.python import PythonOperator
from ingest_script import ingest_callable
AIRFLOW_HOME = os.environ.get("AIRFLOW_HOME", "/opt/airflow/")
PG_HOST = os.getenv('PG_HOST')
PG_USER = os.getenv('PG_USER')
PG_PASSWORD = os.getenv('PG_PASSWORD')
PG_PORT = os.getenv('PG_PORT')
PG_DATABASE = os.getenv('PG_DATABASE')
local_workflow = DAG(
"LocalIngestionDag",
schedule_interval="0 6 2 * *",
start_date=datetime(2021, 1, 1)
)
URL_PREFIX = 'https://s3.amazonaws.com/nyc-tlc/trip+data'
URL_TEMPLATE = URL_PREFIX + '/yellow_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.csv'
OUTPUT_FILE_TEMPLATE = AIRFLOW_HOME + '/output_{{ execution_date.strftime(\'%Y-%m\') }}.csv'
TABLE_NAME_TEMPLATE = 'yellow_taxi_{{ execution_date.strftime(\'%Y_%m\') }}'
with local_workflow:
wget_task = BashOperator(
task_id='wget',
bash_command=f'curl -sSL {URL_TEMPLATE} > {OUTPUT_FILE_TEMPLATE}'
)
ingest_task = PythonOperator(
task_id="ingest",
python_callable=ingest_callable,
op_kwargs=dict(
user=PG_USER,
password=PG_PASSWORD,
host=PG_HOST,
port=PG_PORT,
db=PG_DATABASE,
table_name=TABLE_NAME_TEMPLATE,
csv_file=OUTPUT_FILE_TEMPLATE
),
)
wget_task >> ingest_task | 25.327273 | 92 | 0.639945 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import os
from time import time
import pandas as pd
from sqlalchemy import create_engine
def ingest_callable(user, password, host, port, db, table_name, csv_file, execution_date):
print(table_name, csv_file, execution_date)
engine = create_engine(f'postgresql://{user}:{password}@{host}:{port}/{db}')
engine.connect()
print('connection established successfully, inserting data...')
t_start = time()
df_iter = pd.read_csv(csv_file, iterator=True, chunksize=100000)
df = next(df_iter)
df.tpep_pickup_datetime = pd.to_datetime(df.tpep_pickup_datetime)
df.tpep_dropoff_datetime = pd.to_datetime(df.tpep_dropoff_datetime)
df.head(n=0).to_sql(name=table_name, con=engine, if_exists='replace')
df.to_sql(name=table_name, con=engine, if_exists='append')
t_end = time()
print('inserted the first chunk, took %.3f second' % (t_end - t_start))
while True:
t_start = time()
try:
df = next(df_iter)
except StopIteration:
print("completed")
break
df.tpep_pickup_datetime = pd.to_datetime(df.tpep_pickup_datetime)
df.tpep_dropoff_datetime = pd.to_datetime(df.tpep_dropoff_datetime)
df.to_sql(name=table_name, con=engine, if_exists='append')
t_end = time()
print('inserted another chunk, took %.3f second' % (t_end - t_start))
| 27.306122 | 90 | 0.6443 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import os
from datetime import datetime
from airflow import DAG
from airflow.utils.dates import days_ago
from airflow.operators.bash import BashOperator
from airflow.operators.python import PythonOperator
from google.cloud import storage
PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "pivotal-surfer-336713")
BUCKET = os.environ.get("GCP_GCS_BUCKET", "dtc_data_lake_pivotal-surfer-336713")
dataset_file = "yellow_tripdata_2021-01.csv"
dataset_url = f"https://s3.amazonaws.com/nyc-tlc/trip+data/{dataset_file}"
path_to_local_home = os.environ.get("AIRFLOW_HOME", "/opt/airflow/")
path_to_creds = f"{path_to_local_home}/google_credentials.json"
default_args = {
"owner": "airflow",
"start_date": days_ago(1),
"depends_on_past": False,
"retries": 1,
}
# # Takes 15-20 mins to run. Good case for using Spark (distributed processing, in place of chunks)
# def upload_to_gcs(bucket, object_name, local_file):
# """
# Ref: https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python
# :param bucket: GCS bucket name
# :param object_name: target path & file-name
# :param local_file: source path & file-name
# :return:
# """
# # WORKAROUND to prevent timeout for files > 6 MB on 800 kbps upload link.
# # (Ref: https://github.com/googleapis/python-storage/issues/74)
# storage.blob._MAX_MULTIPART_SIZE = 5 * 1024 * 1024 # 5 MB
# storage.blob._DEFAULT_CHUNKSIZE = 5 * 1024 * 1024 # 5 MB
#
# client = storage.Client()
# bucket = client.bucket(bucket)
#
# blob = bucket.blob(object_name)
# # blob.chunk_size = 5 * 1024 * 1024
# blob.upload_from_filename(local_file)
with DAG(
dag_id="data_ingestion_gcs_dag",
schedule_interval="@daily",
default_args=default_args,
catchup=True,
max_active_runs=1,
) as dag:
# Takes ~2 mins, depending upon your internet's download speed
download_dataset_task = BashOperator(
task_id="download_dataset_task",
bash_command=f"curl -sS {dataset_url} > {path_to_local_home}/{dataset_file}" # "&& unzip {zip_file} && rm {zip_file}"
)
# # APPROACH 1: (takes 20 mins, at an upload speed of 800Kbps. Faster if your internet has a better upload speed)
# upload_to_gcs_task = PythonOperator(
# task_id="upload_to_gcs_task",
# python_callable=upload_to_gcs,
# op_kwargs={
# "bucket": BUCKET,
# "object_name": f"raw/{dataset_file}",
# "local_file": f"{path_to_local_home}/{dataset_file}",
#
# },
# )
# OR APPROACH 2: (takes 20 mins, at an upload speed of 800Kbps. Faster if your internet has a better upload speed)
# Ref: https://cloud.google.com/blog/products/gcp/optimizing-your-cloud-storage-performance-google-cloud-performance-atlas
upload_to_gcs_task = BashOperator(
task_id="upload_to_gcs_task",
bash_command=f"gcloud auth activate-service-account --key-file={path_to_creds} && \
gsutil -m cp {path_to_local_home}/{dataset_file} gs://{BUCKET}",
)
download_dataset_task >> upload_to_gcs_task | 36.421687 | 128 | 0.66087 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import os
import logging
from datetime import datetime
from airflow import DAG
from airflow.utils.dates import days_ago
from airflow.operators.bash import BashOperator
from airflow.operators.python import PythonOperator
from google.cloud import storage
import pyarrow.csv as pv
import pyarrow.parquet as pq
PROJECT_ID = os.environ.get("GCP_PROJECT_ID")
BUCKET = os.environ.get("GCP_GCS_BUCKET")
AIRFLOW_HOME = os.environ.get("AIRFLOW_HOME", "/opt/airflow/")
def format_to_parquet(src_file, dest_file):
if not src_file.endswith('.csv'):
logging.error("Can only accept source files in CSV format, for the moment")
return
table = pv.read_csv(src_file)
pq.write_table(table, dest_file)
def upload_to_gcs(bucket, object_name, local_file):
client = storage.Client()
bucket = client.bucket(bucket)
blob = bucket.blob(object_name)
blob.upload_from_filename(local_file)
default_args = {
"owner": "airflow",
#"start_date": days_ago(1),
"depends_on_past": False,
"retries": 1,
}
def donwload_parquetize_upload_dag(
dag,
url_template,
local_csv_path_template,
local_parquet_path_template,
gcs_path_template
):
with dag:
download_dataset_task = BashOperator(
task_id="download_dataset_task",
bash_command=f"curl -sSLf {url_template} > {local_csv_path_template}"
)
format_to_parquet_task = PythonOperator(
task_id="format_to_parquet_task",
python_callable=format_to_parquet,
op_kwargs={
"src_file": local_csv_path_template,
"dest_file": local_parquet_path_template
},
)
local_to_gcs_task = PythonOperator(
task_id="local_to_gcs_task",
python_callable=upload_to_gcs,
op_kwargs={
"bucket": BUCKET,
"object_name": gcs_path_template,
"local_file": local_parquet_path_template,
},
)
rm_task = BashOperator(
task_id="rm_task",
bash_command=f"rm {local_csv_path_template} {local_parquet_path_template}"
)
download_dataset_task >> format_to_parquet_task >> local_to_gcs_task >> rm_task
URL_PREFIX = 'https://s3.amazonaws.com/nyc-tlc/trip+data'
YELLOW_TAXI_URL_TEMPLATE = URL_PREFIX + '/yellow_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.csv'
YELLOW_TAXI_CSV_FILE_TEMPLATE = AIRFLOW_HOME + '/yellow_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.csv'
YELLOW_TAXI_PARQUET_FILE_TEMPLATE = AIRFLOW_HOME + '/yellow_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.parquet'
YELLOW_TAXI_GCS_PATH_TEMPLATE = "raw/yellow_tripdata/{{ execution_date.strftime(\'%Y\') }}/yellow_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.parquet"
yellow_taxi_data_dag = DAG(
dag_id="yellow_taxi_data_v2",
schedule_interval="0 6 2 * *",
start_date=datetime(2019, 1, 1),
default_args=default_args,
catchup=True,
max_active_runs=3,
tags=['dtc-de'],
)
donwload_parquetize_upload_dag(
dag=yellow_taxi_data_dag,
url_template=YELLOW_TAXI_URL_TEMPLATE,
local_csv_path_template=YELLOW_TAXI_CSV_FILE_TEMPLATE,
local_parquet_path_template=YELLOW_TAXI_PARQUET_FILE_TEMPLATE,
gcs_path_template=YELLOW_TAXI_GCS_PATH_TEMPLATE
)
# https://s3.amazonaws.com/nyc-tlc/trip+data/green_tripdata_2021-01.csv
GREEN_TAXI_URL_TEMPLATE = URL_PREFIX + '/green_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.csv'
GREEN_TAXI_CSV_FILE_TEMPLATE = AIRFLOW_HOME + '/green_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.csv'
GREEN_TAXI_PARQUET_FILE_TEMPLATE = AIRFLOW_HOME + '/green_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.parquet'
GREEN_TAXI_GCS_PATH_TEMPLATE = "raw/green_tripdata/{{ execution_date.strftime(\'%Y\') }}/green_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.parquet"
green_taxi_data_dag = DAG(
dag_id="green_taxi_data_v1",
schedule_interval="0 7 2 * *",
start_date=datetime(2019, 1, 1),
default_args=default_args,
catchup=True,
max_active_runs=3,
tags=['dtc-de'],
)
donwload_parquetize_upload_dag(
dag=green_taxi_data_dag,
url_template=GREEN_TAXI_URL_TEMPLATE,
local_csv_path_template=GREEN_TAXI_CSV_FILE_TEMPLATE,
local_parquet_path_template=GREEN_TAXI_PARQUET_FILE_TEMPLATE,
gcs_path_template=GREEN_TAXI_GCS_PATH_TEMPLATE
)
# https://nyc-tlc.s3.amazonaws.com/trip+data/fhv_tripdata_2021-01.csv
FHV_TAXI_URL_TEMPLATE = URL_PREFIX + '/fhv_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.csv'
FHV_TAXI_CSV_FILE_TEMPLATE = AIRFLOW_HOME + '/fhv_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.csv'
FHV_TAXI_PARQUET_FILE_TEMPLATE = AIRFLOW_HOME + '/fhv_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.parquet'
FHV_TAXI_GCS_PATH_TEMPLATE = "raw/fhv_tripdata/{{ execution_date.strftime(\'%Y\') }}/fhv_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.parquet"
fhv_taxi_data_dag = DAG(
dag_id="hfv_taxi_data_v1",
schedule_interval="0 8 2 * *",
start_date=datetime(2019, 1, 1),
end_date=datetime(2020, 1, 1),
default_args=default_args,
catchup=True,
max_active_runs=3,
tags=['dtc-de'],
)
donwload_parquetize_upload_dag(
dag=fhv_taxi_data_dag,
url_template=FHV_TAXI_URL_TEMPLATE,
local_csv_path_template=FHV_TAXI_CSV_FILE_TEMPLATE,
local_parquet_path_template=FHV_TAXI_PARQUET_FILE_TEMPLATE,
gcs_path_template=FHV_TAXI_GCS_PATH_TEMPLATE
)
# https://s3.amazonaws.com/nyc-tlc/misc/taxi+_zone_lookup.csv
ZONES_URL_TEMPLATE = 'https://s3.amazonaws.com/nyc-tlc/misc/taxi+_zone_lookup.csv'
ZONES_CSV_FILE_TEMPLATE = AIRFLOW_HOME + '/taxi_zone_lookup.csv'
ZONES_PARQUET_FILE_TEMPLATE = AIRFLOW_HOME + '/taxi_zone_lookup.parquet'
ZONES_GCS_PATH_TEMPLATE = "raw/taxi_zone/taxi_zone_lookup.parquet"
zones_data_dag = DAG(
dag_id="zones_data_v1",
schedule_interval="@once",
start_date=days_ago(1),
default_args=default_args,
catchup=True,
max_active_runs=3,
tags=['dtc-de'],
)
donwload_parquetize_upload_dag(
dag=zones_data_dag,
url_template=ZONES_URL_TEMPLATE,
local_csv_path_template=ZONES_CSV_FILE_TEMPLATE,
local_parquet_path_template=ZONES_PARQUET_FILE_TEMPLATE,
gcs_path_template=ZONES_GCS_PATH_TEMPLATE
) | 32.393617 | 156 | 0.665127 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import os
import logging
from airflow import DAG
from airflow.utils.dates import days_ago
from airflow.providers.google.cloud.operators.bigquery import BigQueryCreateExternalTableOperator, BigQueryInsertJobOperator
from airflow.providers.google.cloud.transfers.gcs_to_gcs import GCSToGCSOperator
PROJECT_ID = os.environ.get("GCP_PROJECT_ID")
BUCKET = os.environ.get("GCP_GCS_BUCKET")
path_to_local_home = os.environ.get("AIRFLOW_HOME", "/opt/airflow/")
BIGQUERY_DATASET = os.environ.get("BIGQUERY_DATASET", 'trips_data_all')
DATASET = "tripdata"
COLOUR_RANGE = {'yellow': 'tpep_pickup_datetime', 'green': 'lpep_pickup_datetime'}
INPUT_PART = "raw"
INPUT_FILETYPE = "parquet"
default_args = {
"owner": "airflow",
"start_date": days_ago(1),
"depends_on_past": False,
"retries": 1,
}
# NOTE: DAG declaration - using a Context Manager (an implicit way)
with DAG(
dag_id="gcs_2_bq_dag",
schedule_interval="@daily",
default_args=default_args,
catchup=False,
max_active_runs=1,
tags=['dtc-de'],
) as dag:
for colour, ds_col in COLOUR_RANGE.items():
move_files_gcs_task = GCSToGCSOperator(
task_id=f'move_{colour}_{DATASET}_files_task',
source_bucket=BUCKET,
source_object=f'{INPUT_PART}/{colour}_{DATASET}*.{INPUT_FILETYPE}',
destination_bucket=BUCKET,
destination_object=f'{colour}/{colour}_{DATASET}',
move_object=True
)
bigquery_external_table_task = BigQueryCreateExternalTableOperator(
task_id=f"bq_{colour}_{DATASET}_external_table_task",
table_resource={
"tableReference": {
"projectId": PROJECT_ID,
"datasetId": BIGQUERY_DATASET,
"tableId": f"{colour}_{DATASET}_external_table",
},
"externalDataConfiguration": {
"autodetect": "True",
"sourceFormat": f"{INPUT_FILETYPE.upper()}",
"sourceUris": [f"gs://{BUCKET}/{colour}/*"],
},
},
)
CREATE_BQ_TBL_QUERY = (
f"CREATE OR REPLACE TABLE {BIGQUERY_DATASET}.{colour}_{DATASET} \
PARTITION BY DATE({ds_col}) \
AS \
SELECT * FROM {BIGQUERY_DATASET}.{colour}_{DATASET}_external_table;"
)
# Create a partitioned table from external table
bq_create_partitioned_table_job = BigQueryInsertJobOperator(
task_id=f"bq_create_{colour}_{DATASET}_partitioned_table_task",
configuration={
"query": {
"query": CREATE_BQ_TBL_QUERY,
"useLegacySql": False,
}
}
)
move_files_gcs_task >> bigquery_external_table_task >> bq_create_partitioned_table_job
| 33.890244 | 124 | 0.58951 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | from confluent_kafka import Producer
import argparse
import csv
from typing import Dict
from time import sleep
from settings import CONFLUENT_CLOUD_CONFIG, \
GREEN_TAXI_TOPIC, FHV_TAXI_TOPIC, \
GREEN_TRIP_DATA_PATH, FHV_TRIP_DATA_PATH
class RideCSVProducer:
def __init__(self, probs: Dict, ride_type: str):
self.producer = Producer(**probs)
self.ride_type = ride_type
def parse_row(self, row):
if self.ride_type == 'green':
record = f'{row[5]}, {row[6]}' # PULocationID, DOLocationID
key = str(row[0]) # vendor_id
elif self.ride_type == 'fhv':
record = f'{row[3]}, {row[4]}' # PULocationID, DOLocationID,
key = str(row[0]) # dispatching_base_num
return key, record
def read_records(self, resource_path: str):
records, ride_keys = [], []
with open(resource_path, 'r') as f:
reader = csv.reader(f)
header = next(reader) # skip the header
for row in reader:
key, record = self.parse_row(row)
ride_keys.append(key)
records.append(record)
return zip(ride_keys, records)
def publish(self, records: [str, str], topic: str):
for key_value in records:
key, value = key_value
try:
self.producer.poll(0)
self.producer.produce(topic=topic, key=key, value=value)
print(f"Producing record for <key: {key}, value:{value}>")
except KeyboardInterrupt:
break
except BufferError as bfer:
self.producer.poll(0.1)
except Exception as e:
print(f"Exception while producing record - {value}: {e}")
self.producer.flush()
sleep(10)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Kafka Consumer')
parser.add_argument('--type', type=str, default='green')
args = parser.parse_args()
if args.type == 'green':
kafka_topic = GREEN_TAXI_TOPIC
data_path = GREEN_TRIP_DATA_PATH
elif args.type == 'fhv':
kafka_topic = FHV_TAXI_TOPIC
data_path = FHV_TRIP_DATA_PATH
producer = RideCSVProducer(ride_type=args.type, probs=CONFLUENT_CLOUD_CONFIG)
ride_records = producer.read_records(resource_path=data_path)
producer.publish(records=ride_records, topic=kafka_topic)
| 32.819444 | 81 | 0.587921 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import pyspark.sql.types as T
INPUT_DATA_PATH = '../../resources/rides.csv'
BOOTSTRAP_SERVERS = 'localhost:9092'
TOPIC_WINDOWED_VENDOR_ID_COUNT = 'vendor_counts_windowed'
PRODUCE_TOPIC_RIDES_CSV = CONSUME_TOPIC_RIDES_CSV = 'rides_csv'
RIDE_SCHEMA = T.StructType(
[T.StructField("vendor_id", T.IntegerType()),
T.StructField('tpep_pickup_datetime', T.TimestampType()),
T.StructField('tpep_dropoff_datetime', T.TimestampType()),
T.StructField("passenger_count", T.IntegerType()),
T.StructField("trip_distance", T.FloatType()),
T.StructField("payment_type", T.IntegerType()),
T.StructField("total_amount", T.FloatType()),
])
| 34 | 63 | 0.691265 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | from pyspark.sql import SparkSession
import pyspark.sql.functions as F
from settings import CONFLUENT_CLOUD_CONFIG, GREEN_TAXI_TOPIC, FHV_TAXI_TOPIC, RIDES_TOPIC, ALL_RIDE_SCHEMA
def read_from_kafka(consume_topic: str):
# Spark Streaming DataFrame, connect to Kafka topic served at host in bootrap.servers option
df_stream = spark \
.readStream \
.format("kafka") \
.option("kafka.bootstrap.servers", CONFLUENT_CLOUD_CONFIG['bootstrap.servers']) \
.option("subscribe", consume_topic) \
.option("startingOffsets", "earliest") \
.option("checkpointLocation", "checkpoint") \
.option("kafka.security.protocol", "SASL_SSL") \
.option("kafka.sasl.mechanism", "PLAIN") \
.option("kafka.sasl.jaas.config",
f"""org.apache.kafka.common.security.plain.PlainLoginModule required username="{CONFLUENT_CLOUD_CONFIG['sasl.username']}" password="{CONFLUENT_CLOUD_CONFIG['sasl.password']}";""") \
.option("failOnDataLoss", False) \
.load()
return df_stream
def parse_rides(df, schema):
""" take a Spark Streaming df and parse value col based on <schema>, return streaming df cols in schema """
assert df.isStreaming is True, "DataFrame doesn't receive streaming data"
df = df.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
# split attributes to nested array in one Column
col = F.split(df['value'], ', ')
# expand col to multiple top-level columns
for idx, field in enumerate(schema):
df = df.withColumn(field.name, col.getItem(idx).cast(field.dataType))
df = df.na.drop()
df.printSchema()
return df.select([field.name for field in schema])
def sink_console(df, output_mode: str = 'complete', processing_time: str = '5 seconds'):
query = df.writeStream \
.outputMode(output_mode) \
.trigger(processingTime=processing_time) \
.format("console") \
.option("truncate", False) \
.start() \
.awaitTermination()
return query # pyspark.sql.streaming.StreamingQuery
def sink_kafka(df, topic, output_mode: str = 'complete'):
query = df.writeStream \
.format("kafka") \
.option("kafka.bootstrap.servers", "pkc-75m1o.europe-west3.gcp.confluent.cloud:9092") \
.outputMode(output_mode) \
.option("topic", topic) \
.option("checkpointLocation", "checkpoint") \
.option("kafka.security.protocol", "SASL_SSL") \
.option("kafka.sasl.mechanism", "PLAIN") \
.option("kafka.sasl.jaas.config",
f"""org.apache.kafka.common.security.plain.PlainLoginModule required username="{CONFLUENT_CLOUD_CONFIG['sasl.username']}" password="{CONFLUENT_CLOUD_CONFIG['sasl.password']}";""") \
.option("failOnDataLoss", False) \
.start()
return query
def op_groupby(df, column_names):
df_aggregation = df.groupBy(column_names).count()
return df_aggregation
if __name__ == "__main__":
spark = SparkSession.builder.appName('streaming-homework').getOrCreate()
spark.sparkContext.setLogLevel('WARN')
# Step 1: Consume GREEN_TAXI_TOPIC and FHV_TAXI_TOPIC
df_green_rides = read_from_kafka(consume_topic=GREEN_TAXI_TOPIC)
df_fhv_rides = read_from_kafka(consume_topic=FHV_TAXI_TOPIC)
# Step 2: Publish green and fhv rides to RIDES_TOPIC
kafka_sink_green_query = sink_kafka(df=df_green_rides, topic=RIDES_TOPIC, output_mode='append')
kafka_sink_fhv_query = sink_kafka(df=df_fhv_rides, topic=RIDES_TOPIC, output_mode='append')
# Step 3: Read RIDES_TOPIC and parse it in ALL_RIDE_SCHEMA
df_all_rides = read_from_kafka(consume_topic=RIDES_TOPIC)
df_all_rides = parse_rides(df_all_rides, ALL_RIDE_SCHEMA)
# Step 4: Apply Aggregation on the all_rides
df_pu_location_count = op_groupby(df_all_rides, ['PULocationID'])
df_pu_location_count = df_pu_location_count.sort(F.col('count').desc())
# Step 5: Sink Aggregation Streams to Console
console_sink_pu_location = sink_console(df_pu_location_count, output_mode='complete')
| 39.87 | 197 | 0.666911 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | #!/usr/bin/env python
# coding: utf-8
import os
import argparse
from time import time
import pandas as pd
from sqlalchemy import create_engine
def main(params):
user = params.user
password = params.password
host = params.host
port = params.port
db = params.db
table_name = params.table_name
url = params.url
# the backup files are gzipped, and it's important to keep the correct extension
# for pandas to be able to open the file
if url.endswith('.csv.gz'):
csv_name = 'output.csv.gz'
else:
csv_name = 'output.csv'
os.system(f"wget {url} -O {csv_name}")
engine = create_engine(f'postgresql://{user}:{password}@{host}:{port}/{db}')
df_iter = pd.read_csv(csv_name, iterator=True, chunksize=100000)
df = next(df_iter)
df.tpep_pickup_datetime = pd.to_datetime(df.tpep_pickup_datetime)
df.tpep_dropoff_datetime = pd.to_datetime(df.tpep_dropoff_datetime)
df.head(n=0).to_sql(name=table_name, con=engine, if_exists='replace')
df.to_sql(name=table_name, con=engine, if_exists='append')
while True:
try:
t_start = time()
df = next(df_iter)
df.tpep_pickup_datetime = pd.to_datetime(df.tpep_pickup_datetime)
df.tpep_dropoff_datetime = pd.to_datetime(df.tpep_dropoff_datetime)
df.to_sql(name=table_name, con=engine, if_exists='append')
t_end = time()
print('inserted another chunk, took %.3f second' % (t_end - t_start))
except StopIteration:
print("Finished ingesting data into the postgres database")
break
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Ingest CSV data to Postgres')
parser.add_argument('--user', required=True, help='user name for postgres')
parser.add_argument('--password', required=True, help='password for postgres')
parser.add_argument('--host', required=True, help='host for postgres')
parser.add_argument('--port', required=True, help='port for postgres')
parser.add_argument('--db', required=True, help='database name for postgres')
parser.add_argument('--table_name', required=True, help='name of the table where we will write the results to')
parser.add_argument('--url', required=True, help='url of the csv file')
args = parser.parse_args()
main(args)
| 29.417722 | 115 | 0.642798 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import sys
import pandas as pd
print(sys.argv)
day = sys.argv[1]
# some fancy stuff with pandas
print(f'job finished successfully for day = {day}') | 12.909091 | 51 | 0.723684 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import io
import os
import requests
import pandas as pd
from google.cloud import storage
"""
Pre-reqs:
1. `pip install pandas pyarrow google-cloud-storage`
2. Set GOOGLE_APPLICATION_CREDENTIALS to your project/service-account key
3. Set GCP_GCS_BUCKET as your bucket or change default value of BUCKET
"""
# services = ['fhv','green','yellow']
init_url = 'https://github.com/DataTalksClub/nyc-tlc-data/releases/download/'
# switch out the bucketname
BUCKET = os.environ.get("GCP_GCS_BUCKET", "dtc-data-lake-bucketname")
def upload_to_gcs(bucket, object_name, local_file):
"""
Ref: https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python
"""
# # WORKAROUND to prevent timeout for files > 6 MB on 800 kbps upload speed.
# # (Ref: https://github.com/googleapis/python-storage/issues/74)
# storage.blob._MAX_MULTIPART_SIZE = 5 * 1024 * 1024 # 5 MB
# storage.blob._DEFAULT_CHUNKSIZE = 5 * 1024 * 1024 # 5 MB
client = storage.Client()
bucket = client.bucket(bucket)
blob = bucket.blob(object_name)
blob.upload_from_filename(local_file)
def web_to_gcs(year, service):
for i in range(12):
# sets the month part of the file_name string
month = '0'+str(i+1)
month = month[-2:]
# csv file_name
file_name = f"{service}_tripdata_{year}-{month}.csv.gz"
# download it using requests via a pandas df
request_url = f"{init_url}{service}/{file_name}"
r = requests.get(request_url)
open(file_name, 'wb').write(r.content)
print(f"Local: {file_name}")
# read it back into a parquet file
df = pd.read_csv(file_name, compression='gzip')
file_name = file_name.replace('.csv.gz', '.parquet')
df.to_parquet(file_name, engine='pyarrow')
print(f"Parquet: {file_name}")
# upload it to gcs
upload_to_gcs(BUCKET, f"{service}/{file_name}", file_name)
print(f"GCS: {service}/{file_name}")
web_to_gcs('2019', 'green')
web_to_gcs('2020', 'green')
# web_to_gcs('2019', 'yellow')
# web_to_gcs('2020', 'yellow')
| 30.671642 | 93 | 0.642621 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | #!/usr/bin/env python
# coding: utf-8
import argparse
import pyspark
from pyspark.sql import SparkSession
from pyspark.sql import functions as F
parser = argparse.ArgumentParser()
parser.add_argument('--input_green', required=True)
parser.add_argument('--input_yellow', required=True)
parser.add_argument('--output', required=True)
args = parser.parse_args()
input_green = args.input_green
input_yellow = args.input_yellow
output = args.output
spark = SparkSession.builder \
.appName('test') \
.getOrCreate()
df_green = spark.read.parquet(input_green)
df_green = df_green \
.withColumnRenamed('lpep_pickup_datetime', 'pickup_datetime') \
.withColumnRenamed('lpep_dropoff_datetime', 'dropoff_datetime')
df_yellow = spark.read.parquet(input_yellow)
df_yellow = df_yellow \
.withColumnRenamed('tpep_pickup_datetime', 'pickup_datetime') \
.withColumnRenamed('tpep_dropoff_datetime', 'dropoff_datetime')
common_colums = [
'VendorID',
'pickup_datetime',
'dropoff_datetime',
'store_and_fwd_flag',
'RatecodeID',
'PULocationID',
'DOLocationID',
'passenger_count',
'trip_distance',
'fare_amount',
'extra',
'mta_tax',
'tip_amount',
'tolls_amount',
'improvement_surcharge',
'total_amount',
'payment_type',
'congestion_surcharge'
]
df_green_sel = df_green \
.select(common_colums) \
.withColumn('service_type', F.lit('green'))
df_yellow_sel = df_yellow \
.select(common_colums) \
.withColumn('service_type', F.lit('yellow'))
df_trips_data = df_green_sel.unionAll(df_yellow_sel)
df_trips_data.registerTempTable('trips_data')
df_result = spark.sql("""
SELECT
-- Reveneue grouping
PULocationID AS revenue_zone,
date_trunc('month', pickup_datetime) AS revenue_month,
service_type,
-- Revenue calculation
SUM(fare_amount) AS revenue_monthly_fare,
SUM(extra) AS revenue_monthly_extra,
SUM(mta_tax) AS revenue_monthly_mta_tax,
SUM(tip_amount) AS revenue_monthly_tip_amount,
SUM(tolls_amount) AS revenue_monthly_tolls_amount,
SUM(improvement_surcharge) AS revenue_monthly_improvement_surcharge,
SUM(total_amount) AS revenue_monthly_total_amount,
SUM(congestion_surcharge) AS revenue_monthly_congestion_surcharge,
-- Additional calculations
AVG(passenger_count) AS avg_montly_passenger_count,
AVG(trip_distance) AS avg_montly_trip_distance
FROM
trips_data
GROUP BY
1, 2, 3
""")
df_result.coalesce(1) \
.write.parquet(output, mode='overwrite')
| 21.75 | 72 | 0.690224 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | #!/usr/bin/env python
# coding: utf-8
import argparse
import pyspark
from pyspark.sql import SparkSession
from pyspark.sql import functions as F
parser = argparse.ArgumentParser()
parser.add_argument('--input_green', required=True)
parser.add_argument('--input_yellow', required=True)
parser.add_argument('--output', required=True)
args = parser.parse_args()
input_green = args.input_green
input_yellow = args.input_yellow
output = args.output
spark = SparkSession.builder \
.appName('test') \
.getOrCreate()
spark.conf.set('temporaryGcsBucket', 'dataproc-temp-europe-west6-828225226997-fckhkym8')
df_green = spark.read.parquet(input_green)
df_green = df_green \
.withColumnRenamed('lpep_pickup_datetime', 'pickup_datetime') \
.withColumnRenamed('lpep_dropoff_datetime', 'dropoff_datetime')
df_yellow = spark.read.parquet(input_yellow)
df_yellow = df_yellow \
.withColumnRenamed('tpep_pickup_datetime', 'pickup_datetime') \
.withColumnRenamed('tpep_dropoff_datetime', 'dropoff_datetime')
common_colums = [
'VendorID',
'pickup_datetime',
'dropoff_datetime',
'store_and_fwd_flag',
'RatecodeID',
'PULocationID',
'DOLocationID',
'passenger_count',
'trip_distance',
'fare_amount',
'extra',
'mta_tax',
'tip_amount',
'tolls_amount',
'improvement_surcharge',
'total_amount',
'payment_type',
'congestion_surcharge'
]
df_green_sel = df_green \
.select(common_colums) \
.withColumn('service_type', F.lit('green'))
df_yellow_sel = df_yellow \
.select(common_colums) \
.withColumn('service_type', F.lit('yellow'))
df_trips_data = df_green_sel.unionAll(df_yellow_sel)
df_trips_data.registerTempTable('trips_data')
df_result = spark.sql("""
SELECT
-- Reveneue grouping
PULocationID AS revenue_zone,
date_trunc('month', pickup_datetime) AS revenue_month,
service_type,
-- Revenue calculation
SUM(fare_amount) AS revenue_monthly_fare,
SUM(extra) AS revenue_monthly_extra,
SUM(mta_tax) AS revenue_monthly_mta_tax,
SUM(tip_amount) AS revenue_monthly_tip_amount,
SUM(tolls_amount) AS revenue_monthly_tolls_amount,
SUM(improvement_surcharge) AS revenue_monthly_improvement_surcharge,
SUM(total_amount) AS revenue_monthly_total_amount,
SUM(congestion_surcharge) AS revenue_monthly_congestion_surcharge,
-- Additional calculations
AVG(passenger_count) AS avg_montly_passenger_count,
AVG(trip_distance) AS avg_montly_trip_distance
FROM
trips_data
GROUP BY
1, 2, 3
""")
df_result.write.format('bigquery') \
.option('table', output) \
.save()
| 22.069565 | 88 | 0.690422 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import argparse
from typing import Dict, List
from kafka import KafkaConsumer
from settings import BOOTSTRAP_SERVERS, CONSUME_TOPIC_RIDES_CSV
class RideCSVConsumer:
def __init__(self, props: Dict):
self.consumer = KafkaConsumer(**props)
def consume_from_kafka(self, topics: List[str]):
self.consumer.subscribe(topics=topics)
print('Consuming from Kafka started')
print('Available topics to consume: ', self.consumer.subscription())
while True:
try:
# SIGINT can't be handled when polling, limit timeout to 1 second.
msg = self.consumer.poll(1.0)
if msg is None or msg == {}:
continue
for msg_key, msg_values in msg.items():
for msg_val in msg_values:
print(f'Key:{msg_val.key}-type({type(msg_val.key)}), '
f'Value:{msg_val.value}-type({type(msg_val.value)})')
except KeyboardInterrupt:
break
self.consumer.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Kafka Consumer')
parser.add_argument('--topic', type=str, default=CONSUME_TOPIC_RIDES_CSV)
args = parser.parse_args()
topic = args.topic
config = {
'bootstrap_servers': [BOOTSTRAP_SERVERS],
'auto_offset_reset': 'earliest',
'enable_auto_commit': True,
'key_deserializer': lambda key: int(key.decode('utf-8')),
'value_deserializer': lambda value: value.decode('utf-8'),
'group_id': 'consumer.group.id.csv-example.1',
}
csv_consumer = RideCSVConsumer(props=config)
csv_consumer.consume_from_kafka(topics=[topic])
| 35.25 | 83 | 0.59632 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import csv
from time import sleep
from typing import Dict
from kafka import KafkaProducer
from settings import BOOTSTRAP_SERVERS, INPUT_DATA_PATH, PRODUCE_TOPIC_RIDES_CSV
def delivery_report(err, msg):
if err is not None:
print("Delivery failed for record {}: {}".format(msg.key(), err))
return
print('Record {} successfully produced to {} [{}] at offset {}'.format(
msg.key(), msg.topic(), msg.partition(), msg.offset()))
class RideCSVProducer:
def __init__(self, props: Dict):
self.producer = KafkaProducer(**props)
# self.producer = Producer(producer_props)
@staticmethod
def read_records(resource_path: str):
records, ride_keys = [], []
i = 0
with open(resource_path, 'r') as f:
reader = csv.reader(f)
header = next(reader) # skip the header
for row in reader:
# vendor_id, passenger_count, trip_distance, payment_type, total_amount
records.append(f'{row[0]}, {row[1]}, {row[2]}, {row[3]}, {row[4]}, {row[9]}, {row[16]}')
ride_keys.append(str(row[0]))
i += 1
if i == 5:
break
return zip(ride_keys, records)
def publish(self, topic: str, records: [str, str]):
for key_value in records:
key, value = key_value
try:
self.producer.send(topic=topic, key=key, value=value)
print(f"Producing record for <key: {key}, value:{value}>")
except KeyboardInterrupt:
break
except Exception as e:
print(f"Exception while producing record - {value}: {e}")
self.producer.flush()
sleep(1)
if __name__ == "__main__":
config = {
'bootstrap_servers': [BOOTSTRAP_SERVERS],
'key_serializer': lambda x: x.encode('utf-8'),
'value_serializer': lambda x: x.encode('utf-8')
}
producer = RideCSVProducer(props=config)
ride_records = producer.read_records(resource_path=INPUT_DATA_PATH)
print(ride_records)
producer.publish(topic=PRODUCE_TOPIC_RIDES_CSV, records=ride_records)
| 33.571429 | 104 | 0.574644 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | from typing import List, Dict
class RideRecord:
def __init__(self, arr: List[str]):
self.vendor_id = int(arr[0])
self.passenger_count = int(arr[1])
self.trip_distance = float(arr[2])
self.payment_type = int(arr[3])
self.total_amount = float(arr[4])
@classmethod
def from_dict(cls, d: Dict):
return cls(arr=[
d['vendor_id'],
d['passenger_count'],
d['trip_distance'],
d['payment_type'],
d['total_amount']
]
)
def __repr__(self):
return f'{self.__class__.__name__}: {self.__dict__}'
def dict_to_ride_record(obj, ctx):
if obj is None:
return None
return RideRecord.from_dict(obj)
def ride_record_to_dict(ride_record: RideRecord, ctx):
return ride_record.__dict__
| 21.648649 | 60 | 0.540024 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | from typing import Dict
class RideRecordKey:
def __init__(self, vendor_id):
self.vendor_id = vendor_id
@classmethod
def from_dict(cls, d: Dict):
return cls(vendor_id=d['vendor_id'])
def __repr__(self):
return f'{self.__class__.__name__}: {self.__dict__}'
def dict_to_ride_record_key(obj, ctx):
if obj is None:
return None
return RideRecordKey.from_dict(obj)
def ride_record_key_to_dict(ride_record_key: RideRecordKey, ctx):
return ride_record_key.__dict__
| 20.04 | 65 | 0.619048 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | from typing import List, Dict
from decimal import Decimal
from datetime import datetime
class Ride:
def __init__(self, arr: List[str]):
self.vendor_id = arr[0]
self.tpep_pickup_datetime = datetime.strptime(arr[1], "%Y-%m-%d %H:%M:%S"),
self.tpep_dropoff_datetime = datetime.strptime(arr[2], "%Y-%m-%d %H:%M:%S"),
self.passenger_count = int(arr[3])
self.trip_distance = Decimal(arr[4])
self.rate_code_id = int(arr[5])
self.store_and_fwd_flag = arr[6]
self.pu_location_id = int(arr[7])
self.do_location_id = int(arr[8])
self.payment_type = arr[9]
self.fare_amount = Decimal(arr[10])
self.extra = Decimal(arr[11])
self.mta_tax = Decimal(arr[12])
self.tip_amount = Decimal(arr[13])
self.tolls_amount = Decimal(arr[14])
self.improvement_surcharge = Decimal(arr[15])
self.total_amount = Decimal(arr[16])
self.congestion_surcharge = Decimal(arr[17])
@classmethod
def from_dict(cls, d: Dict):
return cls(arr=[
d['vendor_id'],
d['tpep_pickup_datetime'][0],
d['tpep_dropoff_datetime'][0],
d['passenger_count'],
d['trip_distance'],
d['rate_code_id'],
d['store_and_fwd_flag'],
d['pu_location_id'],
d['do_location_id'],
d['payment_type'],
d['fare_amount'],
d['extra'],
d['mta_tax'],
d['tip_amount'],
d['tolls_amount'],
d['improvement_surcharge'],
d['total_amount'],
d['congestion_surcharge'],
]
)
def __repr__(self):
return f'{self.__class__.__name__}: {self.__dict__}'
| 32.396226 | 84 | 0.520068 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import faust
from taxi_rides import TaxiRide
from faust import current_event
app = faust.App('datatalksclub.stream.v3', broker='kafka://localhost:9092', consumer_auto_offset_reset="earliest")
topic = app.topic('datatalkclub.yellow_taxi_ride.json', value_type=TaxiRide)
high_amount_rides = app.topic('datatalks.yellow_taxi_rides.high_amount')
low_amount_rides = app.topic('datatalks.yellow_taxi_rides.low_amount')
@app.agent(topic)
async def process(stream):
async for event in stream:
if event.total_amount >= 40.0:
await current_event().forward(high_amount_rides)
else:
await current_event().forward(low_amount_rides)
if __name__ == '__main__':
app.main()
| 31.318182 | 114 | 0.701408 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import csv
from json import dumps
from kafka import KafkaProducer
from time import sleep
producer = KafkaProducer(bootstrap_servers=['localhost:9092'],
key_serializer=lambda x: dumps(x).encode('utf-8'),
value_serializer=lambda x: dumps(x).encode('utf-8'))
file = open('../../resources/rides.csv')
csvreader = csv.reader(file)
header = next(csvreader)
for row in csvreader:
key = {"vendorId": int(row[0])}
value = {"vendorId": int(row[0]), "passenger_count": int(row[3]), "trip_distance": float(row[4]), "payment_type": int(row[9]), "total_amount": float(row[16])}
producer.send('datatalkclub.yellow_taxi_ride.json', value=value, key=key)
print("producing")
sleep(1) | 36 | 162 | 0.648173 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import faust
from taxi_rides import TaxiRide
app = faust.App('datatalksclub.stream.v2', broker='kafka://localhost:9092')
topic = app.topic('datatalkclub.yellow_taxi_ride.json', value_type=TaxiRide)
@app.agent(topic)
async def start_reading(records):
async for record in records:
print(record)
if __name__ == '__main__':
app.main()
| 19.823529 | 76 | 0.694051 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import faust
from taxi_rides import TaxiRide
app = faust.App('datatalksclub.stream.v2', broker='kafka://localhost:9092')
topic = app.topic('datatalkclub.yellow_taxi_ride.json', value_type=TaxiRide)
vendor_rides = app.Table('vendor_rides', default=int)
@app.agent(topic)
async def process(stream):
async for event in stream.group_by(TaxiRide.vendorId):
vendor_rides[event.vendorId] += 1
if __name__ == '__main__':
app.main()
| 23.833333 | 76 | 0.704036 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import faust
class TaxiRide(faust.Record, validation=True):
vendorId: str
passenger_count: int
trip_distance: float
payment_type: int
total_amount: float
| 16.7 | 46 | 0.704545 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | from datetime import timedelta
import faust
from taxi_rides import TaxiRide
app = faust.App('datatalksclub.stream.v2', broker='kafka://localhost:9092')
topic = app.topic('datatalkclub.yellow_taxi_ride.json', value_type=TaxiRide)
vendor_rides = app.Table('vendor_rides_windowed', default=int).tumbling(
timedelta(minutes=1),
expires=timedelta(hours=1),
)
@app.agent(topic)
async def process(stream):
async for event in stream.group_by(TaxiRide.vendorId):
vendor_rides[event.vendorId] += 1
if __name__ == '__main__':
app.main()
| 23.26087 | 76 | 0.710952 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | from pyspark.sql import SparkSession
import pyspark.sql.functions as F
from settings import RIDE_SCHEMA, CONSUME_TOPIC_RIDES_CSV, TOPIC_WINDOWED_VENDOR_ID_COUNT
def read_from_kafka(consume_topic: str):
# Spark Streaming DataFrame, connect to Kafka topic served at host in bootrap.servers option
df_stream = spark \
.readStream \
.format("kafka") \
.option("kafka.bootstrap.servers", "localhost:9092,broker:29092") \
.option("subscribe", consume_topic) \
.option("startingOffsets", "earliest") \
.option("checkpointLocation", "checkpoint") \
.load()
return df_stream
def parse_ride_from_kafka_message(df, schema):
""" take a Spark Streaming df and parse value col based on <schema>, return streaming df cols in schema """
assert df.isStreaming is True, "DataFrame doesn't receive streaming data"
df = df.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
# split attributes to nested array in one Column
col = F.split(df['value'], ', ')
# expand col to multiple top-level columns
for idx, field in enumerate(schema):
df = df.withColumn(field.name, col.getItem(idx).cast(field.dataType))
return df.select([field.name for field in schema])
def sink_console(df, output_mode: str = 'complete', processing_time: str = '5 seconds'):
write_query = df.writeStream \
.outputMode(output_mode) \
.trigger(processingTime=processing_time) \
.format("console") \
.option("truncate", False) \
.start()
return write_query # pyspark.sql.streaming.StreamingQuery
def sink_memory(df, query_name, query_template):
query_df = df \
.writeStream \
.queryName(query_name) \
.format("memory") \
.start()
query_str = query_template.format(table_name=query_name)
query_results = spark.sql(query_str)
return query_results, query_df
def sink_kafka(df, topic):
write_query = df.writeStream \
.format("kafka") \
.option("kafka.bootstrap.servers", "localhost:9092,broker:29092") \
.outputMode('complete') \
.option("topic", topic) \
.option("checkpointLocation", "checkpoint") \
.start()
return write_query
def prepare_df_to_kafka_sink(df, value_columns, key_column=None):
columns = df.columns
df = df.withColumn("value", F.concat_ws(', ', *value_columns))
if key_column:
df = df.withColumnRenamed(key_column, "key")
df = df.withColumn("key", df.key.cast('string'))
return df.select(['key', 'value'])
def op_groupby(df, column_names):
df_aggregation = df.groupBy(column_names).count()
return df_aggregation
def op_windowed_groupby(df, window_duration, slide_duration):
df_windowed_aggregation = df.groupBy(
F.window(timeColumn=df.tpep_pickup_datetime, windowDuration=window_duration, slideDuration=slide_duration),
df.vendor_id
).count()
return df_windowed_aggregation
if __name__ == "__main__":
spark = SparkSession.builder.appName('streaming-examples').getOrCreate()
spark.sparkContext.setLogLevel('WARN')
# read_streaming data
df_consume_stream = read_from_kafka(consume_topic=CONSUME_TOPIC_RIDES_CSV)
print(df_consume_stream.printSchema())
# parse streaming data
df_rides = parse_ride_from_kafka_message(df_consume_stream, RIDE_SCHEMA)
print(df_rides.printSchema())
sink_console(df_rides, output_mode='append')
df_trip_count_by_vendor_id = op_groupby(df_rides, ['vendor_id'])
df_trip_count_by_pickup_date_vendor_id = op_windowed_groupby(df_rides, window_duration="10 minutes",
slide_duration='5 minutes')
# write the output out to the console for debugging / testing
sink_console(df_trip_count_by_vendor_id)
# write the output to the kafka topic
df_trip_count_messages = prepare_df_to_kafka_sink(df=df_trip_count_by_pickup_date_vendor_id,
value_columns=['count'], key_column='vendor_id')
kafka_sink_query = sink_kafka(df=df_trip_count_messages, topic=TOPIC_WINDOWED_VENDOR_ID_COUNT)
spark.streams.awaitAnyTermination()
| 35.586207 | 115 | 0.65449 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | /**
* Autogenerated by Avro
*
* DO NOT EDIT DIRECTLY
*/
package schemaregistry;
import org.apache.avro.generic.GenericArray;
import org.apache.avro.specific.SpecificData;
import org.apache.avro.util.Utf8;
import org.apache.avro.message.BinaryMessageEncoder;
import org.apache.avro.message.BinaryMessageDecoder;
import org.apache.avro.message.SchemaStore;
@org.apache.avro.specific.AvroGenerated
public class RideRecord extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
private static final long serialVersionUID = 6805437803204402942L;
public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"RideRecord\",\"namespace\":\"schemaregistry\",\"fields\":[{\"name\":\"vendor_id\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"passenger_count\",\"type\":\"int\"},{\"name\":\"trip_distance\",\"type\":\"double\"}]}");
public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; }
private static final SpecificData MODEL$ = new SpecificData();
private static final BinaryMessageEncoder<RideRecord> ENCODER =
new BinaryMessageEncoder<>(MODEL$, SCHEMA$);
private static final BinaryMessageDecoder<RideRecord> DECODER =
new BinaryMessageDecoder<>(MODEL$, SCHEMA$);
/**
* Return the BinaryMessageEncoder instance used by this class.
* @return the message encoder used by this class
*/
public static BinaryMessageEncoder<RideRecord> getEncoder() {
return ENCODER;
}
/**
* Return the BinaryMessageDecoder instance used by this class.
* @return the message decoder used by this class
*/
public static BinaryMessageDecoder<RideRecord> getDecoder() {
return DECODER;
}
/**
* Create a new BinaryMessageDecoder instance for this class that uses the specified {@link SchemaStore}.
* @param resolver a {@link SchemaStore} used to find schemas by fingerprint
* @return a BinaryMessageDecoder instance for this class backed by the given SchemaStore
*/
public static BinaryMessageDecoder<RideRecord> createDecoder(SchemaStore resolver) {
return new BinaryMessageDecoder<>(MODEL$, SCHEMA$, resolver);
}
/**
* Serializes this RideRecord to a ByteBuffer.
* @return a buffer holding the serialized data for this instance
* @throws java.io.IOException if this instance could not be serialized
*/
public java.nio.ByteBuffer toByteBuffer() throws java.io.IOException {
return ENCODER.encode(this);
}
/**
* Deserializes a RideRecord from a ByteBuffer.
* @param b a byte buffer holding serialized data for an instance of this class
* @return a RideRecord instance decoded from the given buffer
* @throws java.io.IOException if the given bytes could not be deserialized into an instance of this class
*/
public static RideRecord fromByteBuffer(
java.nio.ByteBuffer b) throws java.io.IOException {
return DECODER.decode(b);
}
private java.lang.String vendor_id;
private int passenger_count;
private double trip_distance;
/**
* Default constructor. Note that this does not initialize fields
* to their default values from the schema. If that is desired then
* one should use <code>newBuilder()</code>.
*/
public RideRecord() {}
/**
* All-args constructor.
* @param vendor_id The new value for vendor_id
* @param passenger_count The new value for passenger_count
* @param trip_distance The new value for trip_distance
*/
public RideRecord(java.lang.String vendor_id, java.lang.Integer passenger_count, java.lang.Double trip_distance) {
this.vendor_id = vendor_id;
this.passenger_count = passenger_count;
this.trip_distance = trip_distance;
}
@Override
public org.apache.avro.specific.SpecificData getSpecificData() { return MODEL$; }
@Override
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
// Used by DatumWriter. Applications should not call.
@Override
public java.lang.Object get(int field$) {
switch (field$) {
case 0: return vendor_id;
case 1: return passenger_count;
case 2: return trip_distance;
default: throw new IndexOutOfBoundsException("Invalid index: " + field$);
}
}
// Used by DatumReader. Applications should not call.
@Override
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
case 0: vendor_id = value$ != null ? value$.toString() : null; break;
case 1: passenger_count = (java.lang.Integer)value$; break;
case 2: trip_distance = (java.lang.Double)value$; break;
default: throw new IndexOutOfBoundsException("Invalid index: " + field$);
}
}
/**
* Gets the value of the 'vendor_id' field.
* @return The value of the 'vendor_id' field.
*/
public java.lang.String getVendorId() {
return vendor_id;
}
/**
* Sets the value of the 'vendor_id' field.
* @param value the value to set.
*/
public void setVendorId(java.lang.String value) {
this.vendor_id = value;
}
/**
* Gets the value of the 'passenger_count' field.
* @return The value of the 'passenger_count' field.
*/
public int getPassengerCount() {
return passenger_count;
}
/**
* Sets the value of the 'passenger_count' field.
* @param value the value to set.
*/
public void setPassengerCount(int value) {
this.passenger_count = value;
}
/**
* Gets the value of the 'trip_distance' field.
* @return The value of the 'trip_distance' field.
*/
public double getTripDistance() {
return trip_distance;
}
/**
* Sets the value of the 'trip_distance' field.
* @param value the value to set.
*/
public void setTripDistance(double value) {
this.trip_distance = value;
}
/**
* Creates a new RideRecord RecordBuilder.
* @return A new RideRecord RecordBuilder
*/
public static schemaregistry.RideRecord.Builder newBuilder() {
return new schemaregistry.RideRecord.Builder();
}
/**
* Creates a new RideRecord RecordBuilder by copying an existing Builder.
* @param other The existing builder to copy.
* @return A new RideRecord RecordBuilder
*/
public static schemaregistry.RideRecord.Builder newBuilder(schemaregistry.RideRecord.Builder other) {
if (other == null) {
return new schemaregistry.RideRecord.Builder();
} else {
return new schemaregistry.RideRecord.Builder(other);
}
}
/**
* Creates a new RideRecord RecordBuilder by copying an existing RideRecord instance.
* @param other The existing instance to copy.
* @return A new RideRecord RecordBuilder
*/
public static schemaregistry.RideRecord.Builder newBuilder(schemaregistry.RideRecord other) {
if (other == null) {
return new schemaregistry.RideRecord.Builder();
} else {
return new schemaregistry.RideRecord.Builder(other);
}
}
/**
* RecordBuilder for RideRecord instances.
*/
@org.apache.avro.specific.AvroGenerated
public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase<RideRecord>
implements org.apache.avro.data.RecordBuilder<RideRecord> {
private java.lang.String vendor_id;
private int passenger_count;
private double trip_distance;
/** Creates a new Builder */
private Builder() {
super(SCHEMA$, MODEL$);
}
/**
* Creates a Builder by copying an existing Builder.
* @param other The existing Builder to copy.
*/
private Builder(schemaregistry.RideRecord.Builder other) {
super(other);
if (isValidValue(fields()[0], other.vendor_id)) {
this.vendor_id = data().deepCopy(fields()[0].schema(), other.vendor_id);
fieldSetFlags()[0] = other.fieldSetFlags()[0];
}
if (isValidValue(fields()[1], other.passenger_count)) {
this.passenger_count = data().deepCopy(fields()[1].schema(), other.passenger_count);
fieldSetFlags()[1] = other.fieldSetFlags()[1];
}
if (isValidValue(fields()[2], other.trip_distance)) {
this.trip_distance = data().deepCopy(fields()[2].schema(), other.trip_distance);
fieldSetFlags()[2] = other.fieldSetFlags()[2];
}
}
/**
* Creates a Builder by copying an existing RideRecord instance
* @param other The existing instance to copy.
*/
private Builder(schemaregistry.RideRecord other) {
super(SCHEMA$, MODEL$);
if (isValidValue(fields()[0], other.vendor_id)) {
this.vendor_id = data().deepCopy(fields()[0].schema(), other.vendor_id);
fieldSetFlags()[0] = true;
}
if (isValidValue(fields()[1], other.passenger_count)) {
this.passenger_count = data().deepCopy(fields()[1].schema(), other.passenger_count);
fieldSetFlags()[1] = true;
}
if (isValidValue(fields()[2], other.trip_distance)) {
this.trip_distance = data().deepCopy(fields()[2].schema(), other.trip_distance);
fieldSetFlags()[2] = true;
}
}
/**
* Gets the value of the 'vendor_id' field.
* @return The value.
*/
public java.lang.String getVendorId() {
return vendor_id;
}
/**
* Sets the value of the 'vendor_id' field.
* @param value The value of 'vendor_id'.
* @return This builder.
*/
public schemaregistry.RideRecord.Builder setVendorId(java.lang.String value) {
validate(fields()[0], value);
this.vendor_id = value;
fieldSetFlags()[0] = true;
return this;
}
/**
* Checks whether the 'vendor_id' field has been set.
* @return True if the 'vendor_id' field has been set, false otherwise.
*/
public boolean hasVendorId() {
return fieldSetFlags()[0];
}
/**
* Clears the value of the 'vendor_id' field.
* @return This builder.
*/
public schemaregistry.RideRecord.Builder clearVendorId() {
vendor_id = null;
fieldSetFlags()[0] = false;
return this;
}
/**
* Gets the value of the 'passenger_count' field.
* @return The value.
*/
public int getPassengerCount() {
return passenger_count;
}
/**
* Sets the value of the 'passenger_count' field.
* @param value The value of 'passenger_count'.
* @return This builder.
*/
public schemaregistry.RideRecord.Builder setPassengerCount(int value) {
validate(fields()[1], value);
this.passenger_count = value;
fieldSetFlags()[1] = true;
return this;
}
/**
* Checks whether the 'passenger_count' field has been set.
* @return True if the 'passenger_count' field has been set, false otherwise.
*/
public boolean hasPassengerCount() {
return fieldSetFlags()[1];
}
/**
* Clears the value of the 'passenger_count' field.
* @return This builder.
*/
public schemaregistry.RideRecord.Builder clearPassengerCount() {
fieldSetFlags()[1] = false;
return this;
}
/**
* Gets the value of the 'trip_distance' field.
* @return The value.
*/
public double getTripDistance() {
return trip_distance;
}
/**
* Sets the value of the 'trip_distance' field.
* @param value The value of 'trip_distance'.
* @return This builder.
*/
public schemaregistry.RideRecord.Builder setTripDistance(double value) {
validate(fields()[2], value);
this.trip_distance = value;
fieldSetFlags()[2] = true;
return this;
}
/**
* Checks whether the 'trip_distance' field has been set.
* @return True if the 'trip_distance' field has been set, false otherwise.
*/
public boolean hasTripDistance() {
return fieldSetFlags()[2];
}
/**
* Clears the value of the 'trip_distance' field.
* @return This builder.
*/
public schemaregistry.RideRecord.Builder clearTripDistance() {
fieldSetFlags()[2] = false;
return this;
}
@Override
@SuppressWarnings("unchecked")
public RideRecord build() {
try {
RideRecord record = new RideRecord();
record.vendor_id = fieldSetFlags()[0] ? this.vendor_id : (java.lang.String) defaultValue(fields()[0]);
record.passenger_count = fieldSetFlags()[1] ? this.passenger_count : (java.lang.Integer) defaultValue(fields()[1]);
record.trip_distance = fieldSetFlags()[2] ? this.trip_distance : (java.lang.Double) defaultValue(fields()[2]);
return record;
} catch (org.apache.avro.AvroMissingFieldException e) {
throw e;
} catch (java.lang.Exception e) {
throw new org.apache.avro.AvroRuntimeException(e);
}
}
}
@SuppressWarnings("unchecked")
private static final org.apache.avro.io.DatumWriter<RideRecord>
WRITER$ = (org.apache.avro.io.DatumWriter<RideRecord>)MODEL$.createDatumWriter(SCHEMA$);
@Override public void writeExternal(java.io.ObjectOutput out)
throws java.io.IOException {
WRITER$.write(this, SpecificData.getEncoder(out));
}
@SuppressWarnings("unchecked")
private static final org.apache.avro.io.DatumReader<RideRecord>
READER$ = (org.apache.avro.io.DatumReader<RideRecord>)MODEL$.createDatumReader(SCHEMA$);
@Override public void readExternal(java.io.ObjectInput in)
throws java.io.IOException {
READER$.read(this, SpecificData.getDecoder(in));
}
@Override protected boolean hasCustomCoders() { return true; }
@Override public void customEncode(org.apache.avro.io.Encoder out)
throws java.io.IOException
{
out.writeString(this.vendor_id);
out.writeInt(this.passenger_count);
out.writeDouble(this.trip_distance);
}
@Override public void customDecode(org.apache.avro.io.ResolvingDecoder in)
throws java.io.IOException
{
org.apache.avro.Schema.Field[] fieldOrder = in.readFieldOrderIfDiff();
if (fieldOrder == null) {
this.vendor_id = in.readString();
this.passenger_count = in.readInt();
this.trip_distance = in.readDouble();
} else {
for (int i = 0; i < 3; i++) {
switch (fieldOrder[i].pos()) {
case 0:
this.vendor_id = in.readString();
break;
case 1:
this.passenger_count = in.readInt();
break;
case 2:
this.trip_distance = in.readDouble();
break;
default:
throw new java.io.IOException("Corrupt ResolvingDecoder.");
}
}
}
}
}
| 29.533473 | 377 | 0.659381 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | /**
* Autogenerated by Avro
*
* DO NOT EDIT DIRECTLY
*/
package schemaregistry;
import org.apache.avro.generic.GenericArray;
import org.apache.avro.specific.SpecificData;
import org.apache.avro.util.Utf8;
import org.apache.avro.message.BinaryMessageEncoder;
import org.apache.avro.message.BinaryMessageDecoder;
import org.apache.avro.message.SchemaStore;
@org.apache.avro.specific.AvroGenerated
public class RideRecordCompatible extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
private static final long serialVersionUID = 7163300507090021229L;
public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"RideRecordCompatible\",\"namespace\":\"schemaregistry\",\"fields\":[{\"name\":\"vendorId\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"passenger_count\",\"type\":\"int\"},{\"name\":\"trip_distance\",\"type\":\"double\"},{\"name\":\"pu_location_id\",\"type\":[\"null\",\"long\"],\"default\":null}]}");
public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; }
private static final SpecificData MODEL$ = new SpecificData();
private static final BinaryMessageEncoder<RideRecordCompatible> ENCODER =
new BinaryMessageEncoder<>(MODEL$, SCHEMA$);
private static final BinaryMessageDecoder<RideRecordCompatible> DECODER =
new BinaryMessageDecoder<>(MODEL$, SCHEMA$);
/**
* Return the BinaryMessageEncoder instance used by this class.
* @return the message encoder used by this class
*/
public static BinaryMessageEncoder<RideRecordCompatible> getEncoder() {
return ENCODER;
}
/**
* Return the BinaryMessageDecoder instance used by this class.
* @return the message decoder used by this class
*/
public static BinaryMessageDecoder<RideRecordCompatible> getDecoder() {
return DECODER;
}
/**
* Create a new BinaryMessageDecoder instance for this class that uses the specified {@link SchemaStore}.
* @param resolver a {@link SchemaStore} used to find schemas by fingerprint
* @return a BinaryMessageDecoder instance for this class backed by the given SchemaStore
*/
public static BinaryMessageDecoder<RideRecordCompatible> createDecoder(SchemaStore resolver) {
return new BinaryMessageDecoder<>(MODEL$, SCHEMA$, resolver);
}
/**
* Serializes this RideRecordCompatible to a ByteBuffer.
* @return a buffer holding the serialized data for this instance
* @throws java.io.IOException if this instance could not be serialized
*/
public java.nio.ByteBuffer toByteBuffer() throws java.io.IOException {
return ENCODER.encode(this);
}
/**
* Deserializes a RideRecordCompatible from a ByteBuffer.
* @param b a byte buffer holding serialized data for an instance of this class
* @return a RideRecordCompatible instance decoded from the given buffer
* @throws java.io.IOException if the given bytes could not be deserialized into an instance of this class
*/
public static RideRecordCompatible fromByteBuffer(
java.nio.ByteBuffer b) throws java.io.IOException {
return DECODER.decode(b);
}
private java.lang.String vendorId;
private int passenger_count;
private double trip_distance;
private java.lang.Long pu_location_id;
/**
* Default constructor. Note that this does not initialize fields
* to their default values from the schema. If that is desired then
* one should use <code>newBuilder()</code>.
*/
public RideRecordCompatible() {}
/**
* All-args constructor.
* @param vendorId The new value for vendorId
* @param passenger_count The new value for passenger_count
* @param trip_distance The new value for trip_distance
* @param pu_location_id The new value for pu_location_id
*/
public RideRecordCompatible(java.lang.String vendorId, java.lang.Integer passenger_count, java.lang.Double trip_distance, java.lang.Long pu_location_id) {
this.vendorId = vendorId;
this.passenger_count = passenger_count;
this.trip_distance = trip_distance;
this.pu_location_id = pu_location_id;
}
@Override
public org.apache.avro.specific.SpecificData getSpecificData() { return MODEL$; }
@Override
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
// Used by DatumWriter. Applications should not call.
@Override
public java.lang.Object get(int field$) {
switch (field$) {
case 0: return vendorId;
case 1: return passenger_count;
case 2: return trip_distance;
case 3: return pu_location_id;
default: throw new IndexOutOfBoundsException("Invalid index: " + field$);
}
}
// Used by DatumReader. Applications should not call.
@Override
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
case 0: vendorId = value$ != null ? value$.toString() : null; break;
case 1: passenger_count = (java.lang.Integer)value$; break;
case 2: trip_distance = (java.lang.Double)value$; break;
case 3: pu_location_id = (java.lang.Long)value$; break;
default: throw new IndexOutOfBoundsException("Invalid index: " + field$);
}
}
/**
* Gets the value of the 'vendorId' field.
* @return The value of the 'vendorId' field.
*/
public java.lang.String getVendorId() {
return vendorId;
}
/**
* Sets the value of the 'vendorId' field.
* @param value the value to set.
*/
public void setVendorId(java.lang.String value) {
this.vendorId = value;
}
/**
* Gets the value of the 'passenger_count' field.
* @return The value of the 'passenger_count' field.
*/
public int getPassengerCount() {
return passenger_count;
}
/**
* Sets the value of the 'passenger_count' field.
* @param value the value to set.
*/
public void setPassengerCount(int value) {
this.passenger_count = value;
}
/**
* Gets the value of the 'trip_distance' field.
* @return The value of the 'trip_distance' field.
*/
public double getTripDistance() {
return trip_distance;
}
/**
* Sets the value of the 'trip_distance' field.
* @param value the value to set.
*/
public void setTripDistance(double value) {
this.trip_distance = value;
}
/**
* Gets the value of the 'pu_location_id' field.
* @return The value of the 'pu_location_id' field.
*/
public java.lang.Long getPuLocationId() {
return pu_location_id;
}
/**
* Sets the value of the 'pu_location_id' field.
* @param value the value to set.
*/
public void setPuLocationId(java.lang.Long value) {
this.pu_location_id = value;
}
/**
* Creates a new RideRecordCompatible RecordBuilder.
* @return A new RideRecordCompatible RecordBuilder
*/
public static schemaregistry.RideRecordCompatible.Builder newBuilder() {
return new schemaregistry.RideRecordCompatible.Builder();
}
/**
* Creates a new RideRecordCompatible RecordBuilder by copying an existing Builder.
* @param other The existing builder to copy.
* @return A new RideRecordCompatible RecordBuilder
*/
public static schemaregistry.RideRecordCompatible.Builder newBuilder(schemaregistry.RideRecordCompatible.Builder other) {
if (other == null) {
return new schemaregistry.RideRecordCompatible.Builder();
} else {
return new schemaregistry.RideRecordCompatible.Builder(other);
}
}
/**
* Creates a new RideRecordCompatible RecordBuilder by copying an existing RideRecordCompatible instance.
* @param other The existing instance to copy.
* @return A new RideRecordCompatible RecordBuilder
*/
public static schemaregistry.RideRecordCompatible.Builder newBuilder(schemaregistry.RideRecordCompatible other) {
if (other == null) {
return new schemaregistry.RideRecordCompatible.Builder();
} else {
return new schemaregistry.RideRecordCompatible.Builder(other);
}
}
/**
* RecordBuilder for RideRecordCompatible instances.
*/
@org.apache.avro.specific.AvroGenerated
public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase<RideRecordCompatible>
implements org.apache.avro.data.RecordBuilder<RideRecordCompatible> {
private java.lang.String vendorId;
private int passenger_count;
private double trip_distance;
private java.lang.Long pu_location_id;
/** Creates a new Builder */
private Builder() {
super(SCHEMA$, MODEL$);
}
/**
* Creates a Builder by copying an existing Builder.
* @param other The existing Builder to copy.
*/
private Builder(schemaregistry.RideRecordCompatible.Builder other) {
super(other);
if (isValidValue(fields()[0], other.vendorId)) {
this.vendorId = data().deepCopy(fields()[0].schema(), other.vendorId);
fieldSetFlags()[0] = other.fieldSetFlags()[0];
}
if (isValidValue(fields()[1], other.passenger_count)) {
this.passenger_count = data().deepCopy(fields()[1].schema(), other.passenger_count);
fieldSetFlags()[1] = other.fieldSetFlags()[1];
}
if (isValidValue(fields()[2], other.trip_distance)) {
this.trip_distance = data().deepCopy(fields()[2].schema(), other.trip_distance);
fieldSetFlags()[2] = other.fieldSetFlags()[2];
}
if (isValidValue(fields()[3], other.pu_location_id)) {
this.pu_location_id = data().deepCopy(fields()[3].schema(), other.pu_location_id);
fieldSetFlags()[3] = other.fieldSetFlags()[3];
}
}
/**
* Creates a Builder by copying an existing RideRecordCompatible instance
* @param other The existing instance to copy.
*/
private Builder(schemaregistry.RideRecordCompatible other) {
super(SCHEMA$, MODEL$);
if (isValidValue(fields()[0], other.vendorId)) {
this.vendorId = data().deepCopy(fields()[0].schema(), other.vendorId);
fieldSetFlags()[0] = true;
}
if (isValidValue(fields()[1], other.passenger_count)) {
this.passenger_count = data().deepCopy(fields()[1].schema(), other.passenger_count);
fieldSetFlags()[1] = true;
}
if (isValidValue(fields()[2], other.trip_distance)) {
this.trip_distance = data().deepCopy(fields()[2].schema(), other.trip_distance);
fieldSetFlags()[2] = true;
}
if (isValidValue(fields()[3], other.pu_location_id)) {
this.pu_location_id = data().deepCopy(fields()[3].schema(), other.pu_location_id);
fieldSetFlags()[3] = true;
}
}
/**
* Gets the value of the 'vendorId' field.
* @return The value.
*/
public java.lang.String getVendorId() {
return vendorId;
}
/**
* Sets the value of the 'vendorId' field.
* @param value The value of 'vendorId'.
* @return This builder.
*/
public schemaregistry.RideRecordCompatible.Builder setVendorId(java.lang.String value) {
validate(fields()[0], value);
this.vendorId = value;
fieldSetFlags()[0] = true;
return this;
}
/**
* Checks whether the 'vendorId' field has been set.
* @return True if the 'vendorId' field has been set, false otherwise.
*/
public boolean hasVendorId() {
return fieldSetFlags()[0];
}
/**
* Clears the value of the 'vendorId' field.
* @return This builder.
*/
public schemaregistry.RideRecordCompatible.Builder clearVendorId() {
vendorId = null;
fieldSetFlags()[0] = false;
return this;
}
/**
* Gets the value of the 'passenger_count' field.
* @return The value.
*/
public int getPassengerCount() {
return passenger_count;
}
/**
* Sets the value of the 'passenger_count' field.
* @param value The value of 'passenger_count'.
* @return This builder.
*/
public schemaregistry.RideRecordCompatible.Builder setPassengerCount(int value) {
validate(fields()[1], value);
this.passenger_count = value;
fieldSetFlags()[1] = true;
return this;
}
/**
* Checks whether the 'passenger_count' field has been set.
* @return True if the 'passenger_count' field has been set, false otherwise.
*/
public boolean hasPassengerCount() {
return fieldSetFlags()[1];
}
/**
* Clears the value of the 'passenger_count' field.
* @return This builder.
*/
public schemaregistry.RideRecordCompatible.Builder clearPassengerCount() {
fieldSetFlags()[1] = false;
return this;
}
/**
* Gets the value of the 'trip_distance' field.
* @return The value.
*/
public double getTripDistance() {
return trip_distance;
}
/**
* Sets the value of the 'trip_distance' field.
* @param value The value of 'trip_distance'.
* @return This builder.
*/
public schemaregistry.RideRecordCompatible.Builder setTripDistance(double value) {
validate(fields()[2], value);
this.trip_distance = value;
fieldSetFlags()[2] = true;
return this;
}
/**
* Checks whether the 'trip_distance' field has been set.
* @return True if the 'trip_distance' field has been set, false otherwise.
*/
public boolean hasTripDistance() {
return fieldSetFlags()[2];
}
/**
* Clears the value of the 'trip_distance' field.
* @return This builder.
*/
public schemaregistry.RideRecordCompatible.Builder clearTripDistance() {
fieldSetFlags()[2] = false;
return this;
}
/**
* Gets the value of the 'pu_location_id' field.
* @return The value.
*/
public java.lang.Long getPuLocationId() {
return pu_location_id;
}
/**
* Sets the value of the 'pu_location_id' field.
* @param value The value of 'pu_location_id'.
* @return This builder.
*/
public schemaregistry.RideRecordCompatible.Builder setPuLocationId(java.lang.Long value) {
validate(fields()[3], value);
this.pu_location_id = value;
fieldSetFlags()[3] = true;
return this;
}
/**
* Checks whether the 'pu_location_id' field has been set.
* @return True if the 'pu_location_id' field has been set, false otherwise.
*/
public boolean hasPuLocationId() {
return fieldSetFlags()[3];
}
/**
* Clears the value of the 'pu_location_id' field.
* @return This builder.
*/
public schemaregistry.RideRecordCompatible.Builder clearPuLocationId() {
pu_location_id = null;
fieldSetFlags()[3] = false;
return this;
}
@Override
@SuppressWarnings("unchecked")
public RideRecordCompatible build() {
try {
RideRecordCompatible record = new RideRecordCompatible();
record.vendorId = fieldSetFlags()[0] ? this.vendorId : (java.lang.String) defaultValue(fields()[0]);
record.passenger_count = fieldSetFlags()[1] ? this.passenger_count : (java.lang.Integer) defaultValue(fields()[1]);
record.trip_distance = fieldSetFlags()[2] ? this.trip_distance : (java.lang.Double) defaultValue(fields()[2]);
record.pu_location_id = fieldSetFlags()[3] ? this.pu_location_id : (java.lang.Long) defaultValue(fields()[3]);
return record;
} catch (org.apache.avro.AvroMissingFieldException e) {
throw e;
} catch (java.lang.Exception e) {
throw new org.apache.avro.AvroRuntimeException(e);
}
}
}
@SuppressWarnings("unchecked")
private static final org.apache.avro.io.DatumWriter<RideRecordCompatible>
WRITER$ = (org.apache.avro.io.DatumWriter<RideRecordCompatible>)MODEL$.createDatumWriter(SCHEMA$);
@Override public void writeExternal(java.io.ObjectOutput out)
throws java.io.IOException {
WRITER$.write(this, SpecificData.getEncoder(out));
}
@SuppressWarnings("unchecked")
private static final org.apache.avro.io.DatumReader<RideRecordCompatible>
READER$ = (org.apache.avro.io.DatumReader<RideRecordCompatible>)MODEL$.createDatumReader(SCHEMA$);
@Override public void readExternal(java.io.ObjectInput in)
throws java.io.IOException {
READER$.read(this, SpecificData.getDecoder(in));
}
@Override protected boolean hasCustomCoders() { return true; }
@Override public void customEncode(org.apache.avro.io.Encoder out)
throws java.io.IOException
{
out.writeString(this.vendorId);
out.writeInt(this.passenger_count);
out.writeDouble(this.trip_distance);
if (this.pu_location_id == null) {
out.writeIndex(0);
out.writeNull();
} else {
out.writeIndex(1);
out.writeLong(this.pu_location_id);
}
}
@Override public void customDecode(org.apache.avro.io.ResolvingDecoder in)
throws java.io.IOException
{
org.apache.avro.Schema.Field[] fieldOrder = in.readFieldOrderIfDiff();
if (fieldOrder == null) {
this.vendorId = in.readString();
this.passenger_count = in.readInt();
this.trip_distance = in.readDouble();
if (in.readIndex() != 1) {
in.readNull();
this.pu_location_id = null;
} else {
this.pu_location_id = in.readLong();
}
} else {
for (int i = 0; i < 4; i++) {
switch (fieldOrder[i].pos()) {
case 0:
this.vendorId = in.readString();
break;
case 1:
this.passenger_count = in.readInt();
break;
case 2:
this.trip_distance = in.readDouble();
break;
case 3:
if (in.readIndex() != 1) {
in.readNull();
this.pu_location_id = null;
} else {
this.pu_location_id = in.readLong();
}
break;
default:
throw new java.io.IOException("Corrupt ResolvingDecoder.");
}
}
}
}
}
| 30.317073 | 462 | 0.657858 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | /**
* Autogenerated by Avro
*
* DO NOT EDIT DIRECTLY
*/
package schemaregistry;
import org.apache.avro.generic.GenericArray;
import org.apache.avro.specific.SpecificData;
import org.apache.avro.util.Utf8;
import org.apache.avro.message.BinaryMessageEncoder;
import org.apache.avro.message.BinaryMessageDecoder;
import org.apache.avro.message.SchemaStore;
@org.apache.avro.specific.AvroGenerated
public class RideRecordNoneCompatible extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
private static final long serialVersionUID = -4618980179396772493L;
public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"RideRecordNoneCompatible\",\"namespace\":\"schemaregistry\",\"fields\":[{\"name\":\"vendorId\",\"type\":\"int\"},{\"name\":\"passenger_count\",\"type\":\"int\"},{\"name\":\"trip_distance\",\"type\":\"double\"}]}");
public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; }
private static final SpecificData MODEL$ = new SpecificData();
private static final BinaryMessageEncoder<RideRecordNoneCompatible> ENCODER =
new BinaryMessageEncoder<>(MODEL$, SCHEMA$);
private static final BinaryMessageDecoder<RideRecordNoneCompatible> DECODER =
new BinaryMessageDecoder<>(MODEL$, SCHEMA$);
/**
* Return the BinaryMessageEncoder instance used by this class.
* @return the message encoder used by this class
*/
public static BinaryMessageEncoder<RideRecordNoneCompatible> getEncoder() {
return ENCODER;
}
/**
* Return the BinaryMessageDecoder instance used by this class.
* @return the message decoder used by this class
*/
public static BinaryMessageDecoder<RideRecordNoneCompatible> getDecoder() {
return DECODER;
}
/**
* Create a new BinaryMessageDecoder instance for this class that uses the specified {@link SchemaStore}.
* @param resolver a {@link SchemaStore} used to find schemas by fingerprint
* @return a BinaryMessageDecoder instance for this class backed by the given SchemaStore
*/
public static BinaryMessageDecoder<RideRecordNoneCompatible> createDecoder(SchemaStore resolver) {
return new BinaryMessageDecoder<>(MODEL$, SCHEMA$, resolver);
}
/**
* Serializes this RideRecordNoneCompatible to a ByteBuffer.
* @return a buffer holding the serialized data for this instance
* @throws java.io.IOException if this instance could not be serialized
*/
public java.nio.ByteBuffer toByteBuffer() throws java.io.IOException {
return ENCODER.encode(this);
}
/**
* Deserializes a RideRecordNoneCompatible from a ByteBuffer.
* @param b a byte buffer holding serialized data for an instance of this class
* @return a RideRecordNoneCompatible instance decoded from the given buffer
* @throws java.io.IOException if the given bytes could not be deserialized into an instance of this class
*/
public static RideRecordNoneCompatible fromByteBuffer(
java.nio.ByteBuffer b) throws java.io.IOException {
return DECODER.decode(b);
}
private int vendorId;
private int passenger_count;
private double trip_distance;
/**
* Default constructor. Note that this does not initialize fields
* to their default values from the schema. If that is desired then
* one should use <code>newBuilder()</code>.
*/
public RideRecordNoneCompatible() {}
/**
* All-args constructor.
* @param vendorId The new value for vendorId
* @param passenger_count The new value for passenger_count
* @param trip_distance The new value for trip_distance
*/
public RideRecordNoneCompatible(java.lang.Integer vendorId, java.lang.Integer passenger_count, java.lang.Double trip_distance) {
this.vendorId = vendorId;
this.passenger_count = passenger_count;
this.trip_distance = trip_distance;
}
@Override
public org.apache.avro.specific.SpecificData getSpecificData() { return MODEL$; }
@Override
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
// Used by DatumWriter. Applications should not call.
@Override
public java.lang.Object get(int field$) {
switch (field$) {
case 0: return vendorId;
case 1: return passenger_count;
case 2: return trip_distance;
default: throw new IndexOutOfBoundsException("Invalid index: " + field$);
}
}
// Used by DatumReader. Applications should not call.
@Override
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
case 0: vendorId = (java.lang.Integer)value$; break;
case 1: passenger_count = (java.lang.Integer)value$; break;
case 2: trip_distance = (java.lang.Double)value$; break;
default: throw new IndexOutOfBoundsException("Invalid index: " + field$);
}
}
/**
* Gets the value of the 'vendorId' field.
* @return The value of the 'vendorId' field.
*/
public int getVendorId() {
return vendorId;
}
/**
* Sets the value of the 'vendorId' field.
* @param value the value to set.
*/
public void setVendorId(int value) {
this.vendorId = value;
}
/**
* Gets the value of the 'passenger_count' field.
* @return The value of the 'passenger_count' field.
*/
public int getPassengerCount() {
return passenger_count;
}
/**
* Sets the value of the 'passenger_count' field.
* @param value the value to set.
*/
public void setPassengerCount(int value) {
this.passenger_count = value;
}
/**
* Gets the value of the 'trip_distance' field.
* @return The value of the 'trip_distance' field.
*/
public double getTripDistance() {
return trip_distance;
}
/**
* Sets the value of the 'trip_distance' field.
* @param value the value to set.
*/
public void setTripDistance(double value) {
this.trip_distance = value;
}
/**
* Creates a new RideRecordNoneCompatible RecordBuilder.
* @return A new RideRecordNoneCompatible RecordBuilder
*/
public static schemaregistry.RideRecordNoneCompatible.Builder newBuilder() {
return new schemaregistry.RideRecordNoneCompatible.Builder();
}
/**
* Creates a new RideRecordNoneCompatible RecordBuilder by copying an existing Builder.
* @param other The existing builder to copy.
* @return A new RideRecordNoneCompatible RecordBuilder
*/
public static schemaregistry.RideRecordNoneCompatible.Builder newBuilder(schemaregistry.RideRecordNoneCompatible.Builder other) {
if (other == null) {
return new schemaregistry.RideRecordNoneCompatible.Builder();
} else {
return new schemaregistry.RideRecordNoneCompatible.Builder(other);
}
}
/**
* Creates a new RideRecordNoneCompatible RecordBuilder by copying an existing RideRecordNoneCompatible instance.
* @param other The existing instance to copy.
* @return A new RideRecordNoneCompatible RecordBuilder
*/
public static schemaregistry.RideRecordNoneCompatible.Builder newBuilder(schemaregistry.RideRecordNoneCompatible other) {
if (other == null) {
return new schemaregistry.RideRecordNoneCompatible.Builder();
} else {
return new schemaregistry.RideRecordNoneCompatible.Builder(other);
}
}
/**
* RecordBuilder for RideRecordNoneCompatible instances.
*/
@org.apache.avro.specific.AvroGenerated
public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase<RideRecordNoneCompatible>
implements org.apache.avro.data.RecordBuilder<RideRecordNoneCompatible> {
private int vendorId;
private int passenger_count;
private double trip_distance;
/** Creates a new Builder */
private Builder() {
super(SCHEMA$, MODEL$);
}
/**
* Creates a Builder by copying an existing Builder.
* @param other The existing Builder to copy.
*/
private Builder(schemaregistry.RideRecordNoneCompatible.Builder other) {
super(other);
if (isValidValue(fields()[0], other.vendorId)) {
this.vendorId = data().deepCopy(fields()[0].schema(), other.vendorId);
fieldSetFlags()[0] = other.fieldSetFlags()[0];
}
if (isValidValue(fields()[1], other.passenger_count)) {
this.passenger_count = data().deepCopy(fields()[1].schema(), other.passenger_count);
fieldSetFlags()[1] = other.fieldSetFlags()[1];
}
if (isValidValue(fields()[2], other.trip_distance)) {
this.trip_distance = data().deepCopy(fields()[2].schema(), other.trip_distance);
fieldSetFlags()[2] = other.fieldSetFlags()[2];
}
}
/**
* Creates a Builder by copying an existing RideRecordNoneCompatible instance
* @param other The existing instance to copy.
*/
private Builder(schemaregistry.RideRecordNoneCompatible other) {
super(SCHEMA$, MODEL$);
if (isValidValue(fields()[0], other.vendorId)) {
this.vendorId = data().deepCopy(fields()[0].schema(), other.vendorId);
fieldSetFlags()[0] = true;
}
if (isValidValue(fields()[1], other.passenger_count)) {
this.passenger_count = data().deepCopy(fields()[1].schema(), other.passenger_count);
fieldSetFlags()[1] = true;
}
if (isValidValue(fields()[2], other.trip_distance)) {
this.trip_distance = data().deepCopy(fields()[2].schema(), other.trip_distance);
fieldSetFlags()[2] = true;
}
}
/**
* Gets the value of the 'vendorId' field.
* @return The value.
*/
public int getVendorId() {
return vendorId;
}
/**
* Sets the value of the 'vendorId' field.
* @param value The value of 'vendorId'.
* @return This builder.
*/
public schemaregistry.RideRecordNoneCompatible.Builder setVendorId(int value) {
validate(fields()[0], value);
this.vendorId = value;
fieldSetFlags()[0] = true;
return this;
}
/**
* Checks whether the 'vendorId' field has been set.
* @return True if the 'vendorId' field has been set, false otherwise.
*/
public boolean hasVendorId() {
return fieldSetFlags()[0];
}
/**
* Clears the value of the 'vendorId' field.
* @return This builder.
*/
public schemaregistry.RideRecordNoneCompatible.Builder clearVendorId() {
fieldSetFlags()[0] = false;
return this;
}
/**
* Gets the value of the 'passenger_count' field.
* @return The value.
*/
public int getPassengerCount() {
return passenger_count;
}
/**
* Sets the value of the 'passenger_count' field.
* @param value The value of 'passenger_count'.
* @return This builder.
*/
public schemaregistry.RideRecordNoneCompatible.Builder setPassengerCount(int value) {
validate(fields()[1], value);
this.passenger_count = value;
fieldSetFlags()[1] = true;
return this;
}
/**
* Checks whether the 'passenger_count' field has been set.
* @return True if the 'passenger_count' field has been set, false otherwise.
*/
public boolean hasPassengerCount() {
return fieldSetFlags()[1];
}
/**
* Clears the value of the 'passenger_count' field.
* @return This builder.
*/
public schemaregistry.RideRecordNoneCompatible.Builder clearPassengerCount() {
fieldSetFlags()[1] = false;
return this;
}
/**
* Gets the value of the 'trip_distance' field.
* @return The value.
*/
public double getTripDistance() {
return trip_distance;
}
/**
* Sets the value of the 'trip_distance' field.
* @param value The value of 'trip_distance'.
* @return This builder.
*/
public schemaregistry.RideRecordNoneCompatible.Builder setTripDistance(double value) {
validate(fields()[2], value);
this.trip_distance = value;
fieldSetFlags()[2] = true;
return this;
}
/**
* Checks whether the 'trip_distance' field has been set.
* @return True if the 'trip_distance' field has been set, false otherwise.
*/
public boolean hasTripDistance() {
return fieldSetFlags()[2];
}
/**
* Clears the value of the 'trip_distance' field.
* @return This builder.
*/
public schemaregistry.RideRecordNoneCompatible.Builder clearTripDistance() {
fieldSetFlags()[2] = false;
return this;
}
@Override
@SuppressWarnings("unchecked")
public RideRecordNoneCompatible build() {
try {
RideRecordNoneCompatible record = new RideRecordNoneCompatible();
record.vendorId = fieldSetFlags()[0] ? this.vendorId : (java.lang.Integer) defaultValue(fields()[0]);
record.passenger_count = fieldSetFlags()[1] ? this.passenger_count : (java.lang.Integer) defaultValue(fields()[1]);
record.trip_distance = fieldSetFlags()[2] ? this.trip_distance : (java.lang.Double) defaultValue(fields()[2]);
return record;
} catch (org.apache.avro.AvroMissingFieldException e) {
throw e;
} catch (java.lang.Exception e) {
throw new org.apache.avro.AvroRuntimeException(e);
}
}
}
@SuppressWarnings("unchecked")
private static final org.apache.avro.io.DatumWriter<RideRecordNoneCompatible>
WRITER$ = (org.apache.avro.io.DatumWriter<RideRecordNoneCompatible>)MODEL$.createDatumWriter(SCHEMA$);
@Override public void writeExternal(java.io.ObjectOutput out)
throws java.io.IOException {
WRITER$.write(this, SpecificData.getEncoder(out));
}
@SuppressWarnings("unchecked")
private static final org.apache.avro.io.DatumReader<RideRecordNoneCompatible>
READER$ = (org.apache.avro.io.DatumReader<RideRecordNoneCompatible>)MODEL$.createDatumReader(SCHEMA$);
@Override public void readExternal(java.io.ObjectInput in)
throws java.io.IOException {
READER$.read(this, SpecificData.getDecoder(in));
}
@Override protected boolean hasCustomCoders() { return true; }
@Override public void customEncode(org.apache.avro.io.Encoder out)
throws java.io.IOException
{
out.writeInt(this.vendorId);
out.writeInt(this.passenger_count);
out.writeDouble(this.trip_distance);
}
@Override public void customDecode(org.apache.avro.io.ResolvingDecoder in)
throws java.io.IOException
{
org.apache.avro.Schema.Field[] fieldOrder = in.readFieldOrderIfDiff();
if (fieldOrder == null) {
this.vendorId = in.readInt();
this.passenger_count = in.readInt();
this.trip_distance = in.readDouble();
} else {
for (int i = 0; i < 3; i++) {
switch (fieldOrder[i].pos()) {
case 0:
this.vendorId = in.readInt();
break;
case 1:
this.passenger_count = in.readInt();
break;
case 2:
this.trip_distance = in.readDouble();
break;
default:
throw new java.io.IOException("Corrupt ResolvingDecoder.");
}
}
}
}
}
| 30.607966 | 344 | 0.675975 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example;
import com.opencsv.CSVReader;
import com.opencsv.exceptions.CsvException;
import io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig;
import io.confluent.kafka.serializers.KafkaAvroSerializer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.streams.StreamsConfig;
import schemaregistry.RideRecord;
import java.io.FileReader;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.stream.Collectors;
public class AvroProducer {
private Properties props = new Properties();
public AvroProducer() {
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "pkc-75m1o.europe-west3.gcp.confluent.cloud:9092");
props.put("security.protocol", "SASL_SSL");
props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='"+Secrets.KAFKA_CLUSTER_KEY+"' password='"+Secrets.KAFKA_CLUSTER_SECRET+"';");
props.put("sasl.mechanism", "PLAIN");
props.put("client.dns.lookup", "use_all_dns_ips");
props.put("session.timeout.ms", "45000");
props.put(ProducerConfig.ACKS_CONFIG, "all");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, KafkaAvroSerializer.class.getName());
props.put(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, "https://psrc-kk5gg.europe-west3.gcp.confluent.cloud");
props.put("basic.auth.credentials.source", "USER_INFO");
props.put("basic.auth.user.info", Secrets.SCHEMA_REGISTRY_KEY+":"+Secrets.SCHEMA_REGISTRY_SECRET);
}
public List<RideRecord> getRides() throws IOException, CsvException {
var ridesStream = this.getClass().getResource("/rides.csv");
var reader = new CSVReader(new FileReader(ridesStream.getFile()));
reader.skip(1);
return reader.readAll().stream().map(row ->
RideRecord.newBuilder()
.setVendorId(row[0])
.setTripDistance(Double.parseDouble(row[4]))
.setPassengerCount(Integer.parseInt(row[3]))
.build()
).collect(Collectors.toList());
}
public void publishRides(List<RideRecord> rides) throws ExecutionException, InterruptedException {
KafkaProducer<String, RideRecord> kafkaProducer = new KafkaProducer<>(props);
for (RideRecord ride : rides) {
var record = kafkaProducer.send(new ProducerRecord<>("rides_avro", String.valueOf(ride.getVendorId()), ride), (metadata, exception) -> {
if (exception != null) {
System.out.println(exception.getMessage());
}
});
System.out.println(record.get().offset());
Thread.sleep(500);
}
}
public static void main(String[] args) throws IOException, CsvException, ExecutionException, InterruptedException {
var producer = new AvroProducer();
var rideRecords = producer.getRides();
producer.publishRides(rideRecords);
}
}
| 44.767123 | 192 | 0.688323 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.example.data.Ride;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import java.time.temporal.TemporalUnit;
import java.util.List;
import java.util.Properties;
import io.confluent.kafka.serializers.KafkaJsonDeserializerConfig;
public class JsonConsumer {
private Properties props = new Properties();
private KafkaConsumer<String, Ride> consumer;
public JsonConsumer() {
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "pkc-75m1o.europe-west3.gcp.confluent.cloud:9092");
props.put("security.protocol", "SASL_SSL");
props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='"+Secrets.KAFKA_CLUSTER_KEY+"' password='"+Secrets.KAFKA_CLUSTER_SECRET+"';");
props.put("sasl.mechanism", "PLAIN");
props.put("client.dns.lookup", "use_all_dns_ips");
props.put("session.timeout.ms", "45000");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "io.confluent.kafka.serializers.KafkaJsonDeserializer");
props.put(ConsumerConfig.GROUP_ID_CONFIG, "kafka_tutorial_example.jsonconsumer.v2");
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
props.put(KafkaJsonDeserializerConfig.JSON_VALUE_TYPE, Ride.class);
consumer = new KafkaConsumer<String, Ride>(props);
consumer.subscribe(List.of("rides"));
}
public void consumeFromKafka() {
System.out.println("Consuming form kafka started");
var results = consumer.poll(Duration.of(1, ChronoUnit.SECONDS));
var i = 0;
do {
for(ConsumerRecord<String, Ride> result: results) {
System.out.println(result.value().DOLocationID);
}
results = consumer.poll(Duration.of(1, ChronoUnit.SECONDS));
System.out.println("RESULTS:::" + results.count());
i++;
}
while(!results.isEmpty() || i < 10);
}
public static void main(String[] args) {
JsonConsumer jsonConsumer = new JsonConsumer();
jsonConsumer.consumeFromKafka();
}
}
| 42.631579 | 192 | 0.697104 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.Topology;
import org.apache.kafka.streams.kstream.Consumed;
import org.apache.kafka.streams.kstream.Produced;
import org.example.customserdes.CustomSerdes;
import org.example.data.Ride;
import java.util.Properties;
public class JsonKStream {
private Properties props = new Properties();
public JsonKStream() {
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "pkc-75m1o.europe-west3.gcp.confluent.cloud:9092");
props.put("security.protocol", "SASL_SSL");
props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='"+Secrets.KAFKA_CLUSTER_KEY+"' password='"+Secrets.KAFKA_CLUSTER_SECRET+"';");
props.put("sasl.mechanism", "PLAIN");
props.put("client.dns.lookup", "use_all_dns_ips");
props.put("session.timeout.ms", "45000");
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka_tutorial.kstream.count.plocation.v1");
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
}
public Topology createTopology() {
StreamsBuilder streamsBuilder = new StreamsBuilder();
var ridesStream = streamsBuilder.stream("rides", Consumed.with(Serdes.String(), CustomSerdes.getSerde(Ride.class)));
var puLocationCount = ridesStream.groupByKey().count().toStream();
puLocationCount.to("rides-pulocation-count", Produced.with(Serdes.String(), Serdes.Long()));
return streamsBuilder.build();
}
public void countPLocation() throws InterruptedException {
var topology = createTopology();
var kStreams = new KafkaStreams(topology, props);
kStreams.start();
while (kStreams.state() != KafkaStreams.State.RUNNING) {
System.out.println(kStreams.state());
Thread.sleep(1000);
}
System.out.println(kStreams.state());
Runtime.getRuntime().addShutdownHook(new Thread(kStreams::close));
}
public static void main(String[] args) throws InterruptedException {
var object = new JsonKStream();
object.countPLocation();
}
}
| 42.175439 | 192 | 0.707724 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.Topology;
import org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler;
import org.apache.kafka.streams.kstream.*;
import org.example.customserdes.CustomSerdes;
import org.example.data.PickupLocation;
import org.example.data.Ride;
import org.example.data.VendorInfo;
import java.time.Duration;
import java.util.Optional;
import java.util.Properties;
public class JsonKStreamJoins {
private Properties props = new Properties();
public JsonKStreamJoins() {
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "pkc-75m1o.europe-west3.gcp.confluent.cloud:9092");
props.put("security.protocol", "SASL_SSL");
props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='"+Secrets.KAFKA_CLUSTER_KEY+"' password='"+Secrets.KAFKA_CLUSTER_SECRET+"';");
props.put("sasl.mechanism", "PLAIN");
props.put("client.dns.lookup", "use_all_dns_ips");
props.put("session.timeout.ms", "45000");
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka_tutorial.kstream.joined.rides.pickuplocation.v1");
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
}
public Topology createTopology() {
StreamsBuilder streamsBuilder = new StreamsBuilder();
KStream<String, Ride> rides = streamsBuilder.stream(Topics.INPUT_RIDE_TOPIC, Consumed.with(Serdes.String(), CustomSerdes.getSerde(Ride.class)));
KStream<String, PickupLocation> pickupLocations = streamsBuilder.stream(Topics.INPUT_RIDE_LOCATION_TOPIC, Consumed.with(Serdes.String(), CustomSerdes.getSerde(PickupLocation.class)));
var pickupLocationsKeyedOnPUId = pickupLocations.selectKey((key, value) -> String.valueOf(value.PULocationID));
var joined = rides.join(pickupLocationsKeyedOnPUId, (ValueJoiner<Ride, PickupLocation, Optional<VendorInfo>>) (ride, pickupLocation) -> {
var period = Duration.between(ride.tpep_dropoff_datetime, pickupLocation.tpep_pickup_datetime);
if (period.abs().toMinutes() > 10) return Optional.empty();
else return Optional.of(new VendorInfo(ride.VendorID, pickupLocation.PULocationID, pickupLocation.tpep_pickup_datetime, ride.tpep_dropoff_datetime));
}, JoinWindows.ofTimeDifferenceAndGrace(Duration.ofMinutes(20), Duration.ofMinutes(5)),
StreamJoined.with(Serdes.String(), CustomSerdes.getSerde(Ride.class), CustomSerdes.getSerde(PickupLocation.class)));
joined.filter(((key, value) -> value.isPresent())).mapValues(Optional::get)
.to(Topics.OUTPUT_TOPIC, Produced.with(Serdes.String(), CustomSerdes.getSerde(VendorInfo.class)));
return streamsBuilder.build();
}
public void joinRidesPickupLocation() throws InterruptedException {
var topology = createTopology();
var kStreams = new KafkaStreams(topology, props);
kStreams.setUncaughtExceptionHandler(exception -> {
System.out.println(exception.getMessage());
return StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse.SHUTDOWN_APPLICATION;
});
kStreams.start();
while (kStreams.state() != KafkaStreams.State.RUNNING) {
System.out.println(kStreams.state());
Thread.sleep(1000);
}
System.out.println(kStreams.state());
Runtime.getRuntime().addShutdownHook(new Thread(kStreams::close));
}
public static void main(String[] args) throws InterruptedException {
var object = new JsonKStreamJoins();
object.joinRidesPickupLocation();
}
}
| 50.922078 | 192 | 0.718039 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.Topology;
import org.apache.kafka.streams.kstream.Consumed;
import org.apache.kafka.streams.kstream.Produced;
import org.apache.kafka.streams.kstream.TimeWindows;
import org.apache.kafka.streams.kstream.WindowedSerdes;
import org.example.customserdes.CustomSerdes;
import org.example.data.Ride;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import java.util.Properties;
public class JsonKStreamWindow {
private Properties props = new Properties();
public JsonKStreamWindow() {
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "pkc-75m1o.europe-west3.gcp.confluent.cloud:9092");
props.put("security.protocol", "SASL_SSL");
props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='"+Secrets.KAFKA_CLUSTER_KEY+"' password='"+Secrets.KAFKA_CLUSTER_SECRET+"';");
props.put("sasl.mechanism", "PLAIN");
props.put("client.dns.lookup", "use_all_dns_ips");
props.put("session.timeout.ms", "45000");
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka_tutorial.kstream.count.plocation.v1");
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
}
public Topology createTopology() {
StreamsBuilder streamsBuilder = new StreamsBuilder();
var ridesStream = streamsBuilder.stream("rides", Consumed.with(Serdes.String(), CustomSerdes.getSerde(Ride.class)));
var puLocationCount = ridesStream.groupByKey()
.windowedBy(TimeWindows.ofSizeAndGrace(Duration.ofSeconds(10), Duration.ofSeconds(5)))
.count().toStream();
var windowSerde = WindowedSerdes.timeWindowedSerdeFrom(String.class, 10*1000);
puLocationCount.to("rides-pulocation-window-count", Produced.with(windowSerde, Serdes.Long()));
return streamsBuilder.build();
}
public void countPLocationWindowed() {
var topology = createTopology();
var kStreams = new KafkaStreams(topology, props);
kStreams.start();
Runtime.getRuntime().addShutdownHook(new Thread(kStreams::close));
}
public static void main(String[] args) {
var object = new JsonKStreamWindow();
object.countPLocationWindowed();
}
}
| 41.983607 | 192 | 0.724151 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example;
import com.opencsv.CSVReader;
import com.opencsv.exceptions.CsvException;
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.streams.StreamsConfig;
import org.example.data.Ride;
import java.io.FileReader;
import java.io.IOException;
import java.time.LocalDateTime;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.stream.Collectors;
public class JsonProducer {
private Properties props = new Properties();
public JsonProducer() {
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "pkc-75m1o.europe-west3.gcp.confluent.cloud:9092");
props.put("security.protocol", "SASL_SSL");
props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='"+Secrets.KAFKA_CLUSTER_KEY+"' password='"+Secrets.KAFKA_CLUSTER_SECRET+"';");
props.put("sasl.mechanism", "PLAIN");
props.put("client.dns.lookup", "use_all_dns_ips");
props.put("session.timeout.ms", "45000");
props.put(ProducerConfig.ACKS_CONFIG, "all");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "io.confluent.kafka.serializers.KafkaJsonSerializer");
}
public List<Ride> getRides() throws IOException, CsvException {
var ridesStream = this.getClass().getResource("/rides.csv");
var reader = new CSVReader(new FileReader(ridesStream.getFile()));
reader.skip(1);
return reader.readAll().stream().map(arr -> new Ride(arr))
.collect(Collectors.toList());
}
public void publishRides(List<Ride> rides) throws ExecutionException, InterruptedException {
KafkaProducer<String, Ride> kafkaProducer = new KafkaProducer<String, Ride>(props);
for(Ride ride: rides) {
ride.tpep_pickup_datetime = LocalDateTime.now().minusMinutes(20);
ride.tpep_dropoff_datetime = LocalDateTime.now();
var record = kafkaProducer.send(new ProducerRecord<>("rides", String.valueOf(ride.DOLocationID), ride), (metadata, exception) -> {
if(exception != null) {
System.out.println(exception.getMessage());
}
});
System.out.println(record.get().offset());
System.out.println(ride.DOLocationID);
Thread.sleep(500);
}
}
public static void main(String[] args) throws IOException, CsvException, ExecutionException, InterruptedException {
var producer = new JsonProducer();
var rides = producer.getRides();
producer.publishRides(rides);
}
} | 44.278689 | 192 | 0.682361 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example;
import com.opencsv.exceptions.CsvException;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.example.data.PickupLocation;
import java.io.IOException;
import java.time.LocalDateTime;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
public class JsonProducerPickupLocation {
private Properties props = new Properties();
public JsonProducerPickupLocation() {
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "pkc-75m1o.europe-west3.gcp.confluent.cloud:9092");
props.put("security.protocol", "SASL_SSL");
props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='"+Secrets.KAFKA_CLUSTER_KEY+"' password='"+Secrets.KAFKA_CLUSTER_SECRET+"';");
props.put("sasl.mechanism", "PLAIN");
props.put("client.dns.lookup", "use_all_dns_ips");
props.put("session.timeout.ms", "45000");
props.put(ProducerConfig.ACKS_CONFIG, "all");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "io.confluent.kafka.serializers.KafkaJsonSerializer");
}
public void publish(PickupLocation pickupLocation) throws ExecutionException, InterruptedException {
KafkaProducer<String, PickupLocation> kafkaProducer = new KafkaProducer<String, PickupLocation>(props);
var record = kafkaProducer.send(new ProducerRecord<>("rides_location", String.valueOf(pickupLocation.PULocationID), pickupLocation), (metadata, exception) -> {
if (exception != null) {
System.out.println(exception.getMessage());
}
});
System.out.println(record.get().offset());
}
public static void main(String[] args) throws IOException, CsvException, ExecutionException, InterruptedException {
var producer = new JsonProducerPickupLocation();
producer.publish(new PickupLocation(186, LocalDateTime.now()));
}
}
| 47.577778 | 192 | 0.730892 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example;
public class Secrets {
public static final String KAFKA_CLUSTER_KEY = "REPLACE_WITH_YOUR_KAFKA_CLUSTER_KEY";
public static final String KAFKA_CLUSTER_SECRET = "REPLACE_WITH_YOUR_KAFKA_CLUSTER_SECRET";
public static final String SCHEMA_REGISTRY_KEY = "REPLACE_WITH_SCHEMA_REGISTRY_KEY";
public static final String SCHEMA_REGISTRY_SECRET = "REPLACE_WITH_SCHEMA_REGISTRY_SECRET";
}
| 37.181818 | 95 | 0.761337 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example;
public class Topics {
public static final String INPUT_RIDE_TOPIC = "rides";
public static final String INPUT_RIDE_LOCATION_TOPIC = "rides_location";
public static final String OUTPUT_TOPIC = "vendor_info";
}
| 29.5 | 76 | 0.73251 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example.customserdes;
import io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig;
import io.confluent.kafka.serializers.KafkaJsonDeserializer;
import io.confluent.kafka.serializers.KafkaJsonSerializer;
import io.confluent.kafka.streams.serdes.avro.SpecificAvroSerde;
import org.apache.avro.specific.SpecificRecordBase;
import org.apache.kafka.common.serialization.Deserializer;
import org.apache.kafka.common.serialization.Serde;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.common.serialization.Serializer;
import org.example.data.PickupLocation;
import org.example.data.Ride;
import org.example.data.VendorInfo;
import java.util.HashMap;
import java.util.Map;
public class CustomSerdes {
public static <T> Serde<T> getSerde(Class<T> classOf) {
Map<String, Object> serdeProps = new HashMap<>();
serdeProps.put("json.value.type", classOf);
final Serializer<T> mySerializer = new KafkaJsonSerializer<>();
mySerializer.configure(serdeProps, false);
final Deserializer<T> myDeserializer = new KafkaJsonDeserializer<>();
myDeserializer.configure(serdeProps, false);
return Serdes.serdeFrom(mySerializer, myDeserializer);
}
public static <T extends SpecificRecordBase> SpecificAvroSerde getAvroSerde(boolean isKey, String schemaRegistryUrl) {
var serde = new SpecificAvroSerde<T>();
Map<String, Object> serdeProps = new HashMap<>();
serdeProps.put(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, schemaRegistryUrl);
serde.configure(serdeProps, isKey);
return serde;
}
}
| 37.325581 | 122 | 0.763206 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example.data;
import java.time.LocalDateTime;
public class PickupLocation {
public PickupLocation(long PULocationID, LocalDateTime tpep_pickup_datetime) {
this.PULocationID = PULocationID;
this.tpep_pickup_datetime = tpep_pickup_datetime;
}
public PickupLocation() {
}
public long PULocationID;
public LocalDateTime tpep_pickup_datetime;
}
| 22.352941 | 82 | 0.724747 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example.data;
import java.nio.DoubleBuffer;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
public class Ride {
public Ride(String[] arr) {
VendorID = arr[0];
tpep_pickup_datetime = LocalDateTime.parse(arr[1], DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"));
tpep_dropoff_datetime = LocalDateTime.parse(arr[2], DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"));
passenger_count = Integer.parseInt(arr[3]);
trip_distance = Double.parseDouble(arr[4]);
RatecodeID = Long.parseLong(arr[5]);
store_and_fwd_flag = arr[6];
PULocationID = Long.parseLong(arr[7]);
DOLocationID = Long.parseLong(arr[8]);
payment_type = arr[9];
fare_amount = Double.parseDouble(arr[10]);
extra = Double.parseDouble(arr[11]);
mta_tax = Double.parseDouble(arr[12]);
tip_amount = Double.parseDouble(arr[13]);
tolls_amount = Double.parseDouble(arr[14]);
improvement_surcharge = Double.parseDouble(arr[15]);
total_amount = Double.parseDouble(arr[16]);
congestion_surcharge = Double.parseDouble(arr[17]);
}
public Ride(){}
public String VendorID;
public LocalDateTime tpep_pickup_datetime;
public LocalDateTime tpep_dropoff_datetime;
public int passenger_count;
public double trip_distance;
public long RatecodeID;
public String store_and_fwd_flag;
public long PULocationID;
public long DOLocationID;
public String payment_type;
public double fare_amount;
public double extra;
public double mta_tax;
public double tip_amount;
public double tolls_amount;
public double improvement_surcharge;
public double total_amount;
public double congestion_surcharge;
}
| 35.56 | 112 | 0.681445 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example.data;
import java.time.LocalDateTime;
public class VendorInfo {
public VendorInfo(String vendorID, long PULocationID, LocalDateTime pickupTime, LocalDateTime lastDropoffTime) {
VendorID = vendorID;
this.PULocationID = PULocationID;
this.pickupTime = pickupTime;
this.lastDropoffTime = lastDropoffTime;
}
public VendorInfo() {
}
public String VendorID;
public long PULocationID;
public LocalDateTime pickupTime;
public LocalDateTime lastDropoffTime;
}
| 23.590909 | 116 | 0.72037 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.internals.Topic;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.*;
import org.example.customserdes.CustomSerdes;
import org.example.data.PickupLocation;
import org.example.data.Ride;
import org.example.data.VendorInfo;
import org.example.helper.DataGeneratorHelper;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import javax.xml.crypto.Data;
import java.util.Properties;
import static org.junit.jupiter.api.Assertions.*;
class JsonKStreamJoinsTest {
private Properties props = new Properties();
private static TopologyTestDriver testDriver;
private TestInputTopic<String, Ride> ridesTopic;
private TestInputTopic<String, PickupLocation> pickLocationTopic;
private TestOutputTopic<String, VendorInfo> outputTopic;
private Topology topology = new JsonKStreamJoins().createTopology();
@BeforeEach
public void setup() {
props = new Properties();
props.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, "testing_count_application");
props.setProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234");
if (testDriver != null) {
testDriver.close();
}
testDriver = new TopologyTestDriver(topology, props);
ridesTopic = testDriver.createInputTopic(Topics.INPUT_RIDE_TOPIC, Serdes.String().serializer(), CustomSerdes.getSerde(Ride.class).serializer());
pickLocationTopic = testDriver.createInputTopic(Topics.INPUT_RIDE_LOCATION_TOPIC, Serdes.String().serializer(), CustomSerdes.getSerde(PickupLocation.class).serializer());
outputTopic = testDriver.createOutputTopic(Topics.OUTPUT_TOPIC, Serdes.String().deserializer(), CustomSerdes.getSerde(VendorInfo.class).deserializer());
}
@Test
public void testIfJoinWorksOnSameDropOffPickupLocationId() {
Ride ride = DataGeneratorHelper.generateRide();
PickupLocation pickupLocation = DataGeneratorHelper.generatePickUpLocation(ride.DOLocationID);
ridesTopic.pipeInput(String.valueOf(ride.DOLocationID), ride);
pickLocationTopic.pipeInput(String.valueOf(pickupLocation.PULocationID), pickupLocation);
assertEquals(outputTopic.getQueueSize(), 1);
var expected = new VendorInfo(ride.VendorID, pickupLocation.PULocationID, pickupLocation.tpep_pickup_datetime, ride.tpep_dropoff_datetime);
var result = outputTopic.readKeyValue();
assertEquals(result.key, String.valueOf(ride.DOLocationID));
assertEquals(result.value.VendorID, expected.VendorID);
assertEquals(result.value.pickupTime, expected.pickupTime);
}
@AfterAll
public static void shutdown() {
testDriver.close();
}
} | 44.460317 | 178 | 0.754803 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.*;
import org.example.customserdes.CustomSerdes;
import org.example.data.Ride;
import org.example.helper.DataGeneratorHelper;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.*;
import java.util.Properties;
class JsonKStreamTest {
private Properties props;
private static TopologyTestDriver testDriver;
private TestInputTopic<String, Ride> inputTopic;
private TestOutputTopic<String, Long> outputTopic;
private Topology topology = new JsonKStream().createTopology();
@BeforeEach
public void setup() {
props = new Properties();
props.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, "testing_count_application");
props.setProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234");
if (testDriver != null) {
testDriver.close();
}
testDriver = new TopologyTestDriver(topology, props);
inputTopic = testDriver.createInputTopic("rides", Serdes.String().serializer(), CustomSerdes.getSerde(Ride.class).serializer());
outputTopic = testDriver.createOutputTopic("rides-pulocation-count", Serdes.String().deserializer(), Serdes.Long().deserializer());
}
@Test
public void testIfOneMessageIsPassedToInputTopicWeGetCountOfOne() {
Ride ride = DataGeneratorHelper.generateRide();
inputTopic.pipeInput(String.valueOf(ride.DOLocationID), ride);
assertEquals(outputTopic.readKeyValue(), KeyValue.pair(String.valueOf(ride.DOLocationID), 1L));
assertTrue(outputTopic.isEmpty());
}
@Test
public void testIfTwoMessageArePassedWithDifferentKey() {
Ride ride1 = DataGeneratorHelper.generateRide();
ride1.DOLocationID = 100L;
inputTopic.pipeInput(String.valueOf(ride1.DOLocationID), ride1);
Ride ride2 = DataGeneratorHelper.generateRide();
ride2.DOLocationID = 200L;
inputTopic.pipeInput(String.valueOf(ride2.DOLocationID), ride2);
assertEquals(outputTopic.readKeyValue(), KeyValue.pair(String.valueOf(ride1.DOLocationID), 1L));
assertEquals(outputTopic.readKeyValue(), KeyValue.pair(String.valueOf(ride2.DOLocationID), 1L));
assertTrue(outputTopic.isEmpty());
}
@Test
public void testIfTwoMessageArePassedWithSameKey() {
Ride ride1 = DataGeneratorHelper.generateRide();
ride1.DOLocationID = 100L;
inputTopic.pipeInput(String.valueOf(ride1.DOLocationID), ride1);
Ride ride2 = DataGeneratorHelper.generateRide();
ride2.DOLocationID = 100L;
inputTopic.pipeInput(String.valueOf(ride2.DOLocationID), ride2);
assertEquals(outputTopic.readKeyValue(), KeyValue.pair("100", 1L));
assertEquals(outputTopic.readKeyValue(), KeyValue.pair("100", 2L));
assertTrue(outputTopic.isEmpty());
}
@AfterAll
public static void tearDown() {
testDriver.close();
}
} | 37.7375 | 139 | 0.715623 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example.helper;
import org.example.data.PickupLocation;
import org.example.data.Ride;
import org.example.data.VendorInfo;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.List;
public class DataGeneratorHelper {
public static Ride generateRide() {
var arrivalTime = LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"));
var departureTime = LocalDateTime.now().minusMinutes(30).format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"));
return new Ride(new String[]{"1", departureTime, arrivalTime,"1","1.50","1","N","238","75","2","8","0.5","0.5","0","0","0.3","9.3","0"});
}
public static PickupLocation generatePickUpLocation(long pickupLocationId) {
return new PickupLocation(pickupLocationId, LocalDateTime.now());
}
}
| 38 | 145 | 0.715286 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | from datetime import datetime, timedelta
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators import LoadDimensionOperator
from helpers import SqlQueries
def load_dimension_subdag(
parent_dag_name,
task_id,
redshift_conn_id,
sql_statement,
delete_load,
table_name,
*args, **kwargs):
dag = DAG(f"{parent_dag_name}.{task_id}", **kwargs)
load_dimension_table = LoadDimensionOperator(
task_id=task_id,
dag=dag,
redshift_conn_id=redshift_conn_id,
sql_query = sql_statement,
delete_load = delete_load,
table_name = table_name,
)
load_dimension_table
return dag | 23.333333 | 58 | 0.657064 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | from datetime import datetime, timedelta
import os
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators import ( CreateTableOperator, StageToRedshiftOperator, LoadFactOperator,
LoadDimensionOperator, DataQualityOperator)
from helpers import SqlQueries
from sparkify_dimension_subdag import load_dimension_subdag
from airflow.operators.subdag_operator import SubDagOperator
#AWS_KEY = os.environ.get('AWS_KEY')
#AWS_SECRET = os.environ.get('AWS_SECRET')
s3_bucket = 'udacity-dend-warehouse'
song_s3_key = "song_data"
log_s3_key = "log-data"
log_json_file = "log_json_path.json"
default_args = {
'owner': 'udacity',
'depends_on_past': True,
'start_date': datetime(2019, 1, 12),
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
'catchup': True
}
dag_name = 'udac_example_dag'
dag = DAG(dag_name,
default_args=default_args,
description='Load and transform data in Redshift with Airflow',
schedule_interval='0 * * * *',
max_active_runs = 1
)
start_operator = DummyOperator(task_id='Begin_execution', dag=dag)
create_tables_in_redshift = CreateTableOperator(
task_id = 'create_tables_in_redshift',
redshift_conn_id = 'redshift',
dag = dag
)
stage_events_to_redshift = StageToRedshiftOperator(
task_id='Stage_events',
table_name="staging_events",
s3_bucket = s3_bucket,
s3_key = log_s3_key,
file_format="JSON",
log_json_file = log_json_file,
redshift_conn_id = "redshift",
aws_credential_id="aws_credentials",
dag=dag,
provide_context=True
)
stage_songs_to_redshift = StageToRedshiftOperator(
task_id='Stage_songs',
table_name="staging_songs",
s3_bucket = s3_bucket,
s3_key = song_s3_key,
file_format="JSON",
redshift_conn_id = "redshift",
aws_credential_id="aws_credentials",
dag=dag,
provide_context=True
)
load_songplays_table = LoadFactOperator(
task_id='Load_songplays_fact_table',
redshift_conn_id = 'redshift',
sql_query = SqlQueries.songplay_table_insert,
dag=dag
)
load_user_dimension_table = SubDagOperator(
subdag=load_dimension_subdag(
parent_dag_name=dag_name,
task_id="Load_user_dim_table",
redshift_conn_id="redshift",
start_date=default_args['start_date'],
sql_statement=SqlQueries.user_table_insert,
delete_load = True,
table_name = "users",
),
task_id="Load_user_dim_table",
dag=dag,
)
load_song_dimension_table = SubDagOperator(
subdag=load_dimension_subdag(
parent_dag_name=dag_name,
task_id="Load_song_dim_table",
redshift_conn_id="redshift",
start_date=default_args['start_date'],
sql_statement=SqlQueries.song_table_insert,
delete_load = True,
table_name = "songs",
),
task_id="Load_song_dim_table",
dag=dag,
)
load_artist_dimension_table = SubDagOperator(
subdag=load_dimension_subdag(
parent_dag_name=dag_name,
task_id="Load_artist_dim_table",
redshift_conn_id="redshift",
start_date=default_args['start_date'],
sql_statement=SqlQueries.artist_table_insert,
delete_load = True,
table_name = "artists",
),
task_id="Load_artist_dim_table",
dag=dag,
)
load_time_dimension_table = SubDagOperator(
subdag=load_dimension_subdag(
parent_dag_name=dag_name,
task_id="Load_time_dim_table",
redshift_conn_id="redshift",
start_date=default_args['start_date'],
sql_statement=SqlQueries.time_table_insert,
delete_load = True,
table_name = "time",
),
task_id="Load_time_dim_table",
dag=dag,
)
run_quality_checks = DataQualityOperator(
task_id='Run_data_quality_checks',
dag=dag,
redshift_conn_id = "redshift",
tables = ["artists", "songplays", "songs", "time", "users"]
)
end_operator = DummyOperator(task_id='Stop_execution', dag=dag)
start_operator >> create_tables_in_redshift
create_tables_in_redshift >> [stage_songs_to_redshift, stage_events_to_redshift] >> load_songplays_table
load_songplays_table >> [load_user_dimension_table, load_song_dimension_table, load_artist_dimension_table, load_time_dimension_table] >> run_quality_checks >> end_operator
| 27.06962 | 172 | 0.657871 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | 0 | 0 | 0 |
|
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | import configparser
# CONFIG
config = configparser.ConfigParser()
config.read('dwh.cfg')
# DROP TABLES
staging_events_table_drop = "DROP TABle IF EXISTS staging_events;"
staging_songs_table_drop = "DROP TABLE IF EXISTS staging_songs;"
songplay_table_drop = "DROP TABLE IF EXISTS songplays;"
user_table_drop = "DROP TABLE IF EXISTS users;"
song_table_drop = "DROP TABLE IF EXISTS songs;"
artist_table_drop = "DROP TABLE IF EXISTS artists;"
time_table_drop = "DROP TABLE IF EXISTS time;"
# CREATE TABLES
staging_events_table_create= ("""
CREATE TABLE IF NOT EXISTS staging_events
(
artist VARCHAR,
auth VARCHAR,
firstName VARCHAR(50),
gender CHAR,
itemInSession INTEGER,
lastName VARCHAR(50),
length FLOAT,
level VARCHAR,
location VARCHAR,
method VARCHAR,
page VARCHAR,
registration FLOAT,
sessionId INTEGER,
song VARCHAR,
status INTEGER,
ts BIGINT,
userAgent VARCHAR,
userId INTEGER
);
""")
staging_songs_table_create = ("""
CREATE TABLE IF NOT EXISTS staging_songs
(
num_songs INTEGER,
artist_id VARCHAR,
artist_latitude FLOAT,
artist_longitude FLOAT,
artist_location VARCHAR,
artist_name VARCHAR,
song_id VARCHAR,
title VARCHAR,
duration FLOAT,
year FLOAT
);
""")
songplay_table_create = ("""
CREATE TABLE IF NOT EXISTS songplays
(
songplay_id INTEGER IDENTITY (1, 1) PRIMARY KEY ,
start_time TIMESTAMP,
user_id INTEGER,
level VARCHAR,
song_id VARCHAR,
artist_id VARCHAR,
session_id INTEGER,
location VARCHAR,
user_agent VARCHAR
)
DISTSTYLE KEY
DISTKEY ( start_time )
SORTKEY ( start_time );
""")
user_table_create = ("""
CREATE TABLE IF NOT EXISTS users
(
userId INTEGER PRIMARY KEY,
firsname VARCHAR(50),
lastname VARCHAR(50),
gender CHAR(1) ENCODE BYTEDICT,
level VARCHAR ENCODE BYTEDICT
)
SORTKEY (userId);
""")
song_table_create = ("""
CREATE TABLE IF NOT EXISTS songs
(
song_id VARCHAR PRIMARY KEY,
title VARCHAR,
artist_id VARCHAR,
year INTEGER ENCODE BYTEDICT,
duration FLOAT
)
SORTKEY (song_id);
""")
artist_table_create = ("""
CREATE TABLE IF NOT EXISTS artists
(
artist_id VARCHAR PRIMARY KEY ,
name VARCHAR,
location VARCHAR,
latitude FLOAT,
longitude FLOAT
)
SORTKEY (artist_id);
""")
time_table_create = ("""
CREATE TABLE IF NOT EXISTS time
(
start_time TIMESTAMP PRIMARY KEY ,
hour INTEGER,
day INTEGER,
week INTEGER,
month INTEGER,
year INTEGER ENCODE BYTEDICT ,
weekday VARCHAR(9) ENCODE BYTEDICT
)
DISTSTYLE KEY
DISTKEY ( start_time )
SORTKEY (start_time);
""")
# STAGING TABLES
staging_events_copy = ("""
COPY staging_events
FROM {}
iam_role {}
FORMAT AS json {};
""").format(config['S3']['LOG_DATA'], config['IAM_ROLE']['ARN'], config['S3']['LOG_JSONPATH'])
staging_songs_copy = ("""
COPY staging_songs
FROM {}
iam_role {}
FORMAT AS json 'auto';
""").format(config['S3']['SONG_DATA'], config['IAM_ROLE']['ARN'])
# FINAL TABLES
songplay_table_insert = ("""
INSERT INTO songplays (START_TIME, USER_ID, LEVEL, SONG_ID, ARTIST_ID, SESSION_ID, LOCATION, USER_AGENT)
SELECT DISTINCT
TIMESTAMP 'epoch' + (se.ts / 1000) * INTERVAL '1 second' as start_time,
se.userId,
se.level,
ss.song_id,
ss.artist_id,
se.sessionId,
se.location,
se.userAgent
FROM staging_songs ss
INNER JOIN staging_events se
ON (ss.title = se.song AND se.artist = ss.artist_name)
AND se.page = 'NextSong';
""")
user_table_insert = ("""
INSERT INTO users
SELECT DISTINCT userId, firstName, lastName, gender, level
FROM staging_events
WHERE userId IS NOT NULL
AND page = 'NextSong';
""")
song_table_insert = ("""
INSERT INTO songs
SELECT
DISTINCT song_id, title, artist_id, year, duration
FROM staging_songs
WHERE song_id IS NOT NULL;
""")
artist_table_insert = ("""
INSERT INTO artists
SELECT
DISTINCT artist_id, artist_name, artist_location, artist_latitude, artist_longitude
FROM staging_songs;
""")
time_table_insert = ("""
insert into time
SELECT DISTINCT
TIMESTAMP 'epoch' + (ts/1000) * INTERVAL '1 second' as start_time,
EXTRACT(HOUR FROM start_time) AS hour,
EXTRACT(DAY FROM start_time) AS day,
EXTRACT(WEEKS FROM start_time) AS week,
EXTRACT(MONTH FROM start_time) AS month,
EXTRACT(YEAR FROM start_time) AS year,
to_char(start_time, 'Day') AS weekday
FROM staging_events;
""")
# QUERY LISTS
create_table_queries = [staging_events_table_create, staging_songs_table_create, songplay_table_create, user_table_create, song_table_create, artist_table_create, time_table_create]
drop_table_queries = [staging_events_table_drop, staging_songs_table_drop, songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop]
copy_table_queries = [staging_events_copy, staging_songs_copy]
insert_table_queries = [songplay_table_insert, user_table_insert, song_table_insert, artist_table_insert, time_table_insert]
| 23.429952 | 181 | 0.68038 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class CreateTableOperator(BaseOperator):
ui_color = '#358140'
@apply_defaults
def __init__(self, redshift_conn_id = "", *args, **kwargs):
super(CreateTableOperator, self).__init__(*args, **kwargs)
self.redshift_conn_id = redshift_conn_id
def execute(self, context):
self.log.info('Creating Postgres SQL Hook')
redshift = PostgresHook(postgres_conn_id = self.redshift_conn_id)
self.log.info('Executing creating tables in Redshift.')
queries = open('/home/workspace/airflow/create_tables.sql', 'r').read()
redshift.run(queries)
self.log.info("Tables created ")
| 26.7 | 80 | 0.651807 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class DataQualityOperator(BaseOperator):
ui_color = '#89DA59'
@apply_defaults
def __init__(self,
redshift_conn_id="",
tables = [],
*args, **kwargs):
super(DataQualityOperator, self).__init__(*args, **kwargs)
self.redshift_conn_id = redshift_conn_id
self.tables = tables
def execute(self, context):
redshift_hook = PostgresHook(postgres_conn_id = self.redshift_conn_id)
for table in self.tables:
self.log.info(f"Starting data quality validation on table : {table}")
records = redshift_hook.get_records(f"select count(*) from {table};")
if len(records) < 1 or len(records[0]) < 1 or records[0][0] < 1:
self.log.error(f"Data Quality validation failed for table : {table}.")
raise ValueError(f"Data Quality validation failed for table : {table}")
self.log.info(f"Data Quality Validation Passed on table : {table}!!!")
self.log.info('DataQualityOperator not implemented yet') | 36.823529 | 94 | 0.595331 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class LoadDimensionOperator(BaseOperator):
ui_color = '#80BD9E'
@apply_defaults
def __init__(self,
redshift_conn_id="",
sql_query = "",
delete_load = False,
table_name = "",
*args, **kwargs):
super(LoadDimensionOperator, self).__init__(*args, **kwargs)
self.redshift_conn_id = redshift_conn_id
self.sql_query = sql_query
self.table_name = table_name
self.delete_load = delete_load
def execute(self, context):
redshift_hook = PostgresHook(postgres_conn_id = self.redshift_conn_id)
if self.delete_load:
self.log.info(f"Delete load operation set to TRUE. Running delete statement on table {self.table_name}")
redshift_hook.run(f"DELETE FROM {self.table_name}")
self.log.info(f"Running query to load data into Dimension Table {self.table_name}")
redshift_hook.run(self.sql_query)
self.log.info(f"Dimension Table {self.table_name} loaded.")
| 36.65625 | 116 | 0.622924 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class LoadFactOperator(BaseOperator):
ui_color = '#F98866'
@apply_defaults
def __init__(self,
redshift_conn_id="",
sql_query = "",
*args, **kwargs):
super(LoadFactOperator, self).__init__(*args, **kwargs)
self.redshift_conn_id = redshift_conn_id
self.sql_query = sql_query
def execute(self, context):
redshift_hook = PostgresHook(postgres_conn_id = self.redshift_conn_id)
redshift_hook.run(self.sql_query)
| 27.416667 | 78 | 0.621145 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.contrib.hooks.aws_hook import AwsHook
class StageToRedshiftOperator(BaseOperator):
ui_color = '#358140'
copy_query = " COPY {} \
FROM '{}' \
ACCESS_KEY_ID '{}' \
SECRET_ACCESS_KEY '{}' \
FORMAT AS json '{}'; \
"
@apply_defaults
def __init__(self,
redshift_conn_id="",
aws_credential_id="",
table_name = "",
s3_bucket="",
s3_key = "",
file_format = "",
log_json_file = "",
*args, **kwargs):
super(StageToRedshiftOperator, self).__init__(*args, **kwargs)
self.redshift_conn_id = redshift_conn_id
self.aws_credential_id = aws_credential_id
self.table_name = table_name
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.file_format = file_format
self.log_json_file = log_json_file
self.execution_date = kwargs.get('execution_date')
def execute(self, context):
aws_hook = AwsHook(self.aws_credential_id)
credentials = aws_hook.get_credentials()
s3_path = "s3://{}/{}".format(self.s3_bucket, self.s3_key)
self.log.info(f"Picking staging file for table {self.table_name} from location : {s3_path}")
if self.log_json_file != "":
self.log_json_file = "s3://{}/{}".format(self.s3_bucket, self.log_json_file)
copy_query = self.copy_query.format(self.table_name, s3_path, credentials.access_key, credentials.secret_key, self.log_json_file)
else:
copy_query = self.copy_query.format(self.table_name, s3_path, credentials.access_key, credentials.secret_key, 'auto')
self.log.info(f"Running copy query : {copy_query}")
redshift_hook = PostgresHook(postgres_conn_id = self.redshift_conn_id)
redshift_hook.run(copy_query)
self.log.info(f"Table {self.table_name} staged successfully!!")
| 37.280702 | 141 | 0.577258 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | import configparser
from pathlib import Path
config = configparser.ConfigParser()
config.read_file(open(f"{Path(__file__).parents[0]}/config.cfg"))
api_key = config['KEYS']['API_KEY']
headers = {'Authorization': 'Bearer %s' % api_key} | 28.625 | 65 | 0.711864 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | # This is request module of this project
from request import Request
from auth import headers
import json
class BusinessSearch:
def __init__(self, term, location, price=None):
self._param = {'term' : term, 'location' : location}
if price:
self._param['price'] = price
self._base_url = 'https://api.yelp.com/v3/businesses/search'
self._business_list = self._search_business()
def _search_business(self):
business_search_request = Request.get_content(url=self._base_url, param=self._param)
return business_search_request['businesses'] if business_search_request is not None else []
def _parse_results(self, data):
# Categories data : 'categories': [{'alias': 'bakeries', 'title': 'Bakeries'}]
categories = ' '.join([category['title'] for category in data['categories']])
# Longitude and latitude data : 'coordinates': {'latitude': 45.5232, 'longitude': -73.583459}
longitude = data['coordinates']['longitude']
latitude = data['coordinates']['latitude']
# Location example : 'location': { 'display_address': ['316 Avenue du Mont-Royal E', 'Montreal, QC H2T 1P7', 'Canada']}
location = ','.join(data['location']['display_address'])
return {"id" : data['id'], "name" : self._add_escape_character(data['name']), "image_url" : data['image_url'], "url" : data['url'],
"review_count" : data['review_count'], "categories" : categories, "rating" : data['rating'],
"latitude" : latitude, "longitude" : longitude, "price" : data['price'], "location" : location,
"display_phone" : data['display_phone']
}
def _add_escape_character(self, data):
return data.replace("'", "''")
def get_results(self):
return [self._parse_results(business) for business in self._business_list] | 47.512821 | 139 | 0.615547 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | import psycopg2
import configparser
from pathlib import Path
from queries import create_business_schema, create_business_table
config = configparser.ConfigParser()
config.read_file(open(f"{Path(__file__).parents[0]}/config.cfg"))
class DatabaseDriver:
def __init__(self):
self._conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['DATABASE'].values()))
self._cur = self._conn.cursor()
def execute_query(self, query):
self._cur.execute(query)
def setup(self):
self.execute_query(create_business_schema)
self.execute_query(create_business_table) | 30.8 | 123 | 0.685039 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | import configparser
from pathlib import Path
from businesssearch import BusinessSearch
from queries import create_business_schema, create_business_table, insert_business_table
from databasedriver import DatabaseDriver
import argparse
config = configparser.ConfigParser()
config.read_file(open(f"{Path(__file__).parents[0]}/config.cfg"))
parser = argparse.ArgumentParser(
description="A Example yelp business finder based on parameters such as term, location, price, ")
api_key = config['KEYS']['API_KEY']
headers = {'Authorization': 'Bearer %s' % api_key}
def to_string(data):
return [str(value) for value in data.values()]
def main():
args = parser.parse_args()
# Pricing levels to filter the search result with: 1 = $, 2 = $$, 3 = $$$, 4 = $$$$.
b = BusinessSearch(term=args.term, location=args.location, price=args.price)
db = DatabaseDriver()
db.setup()
queries = [insert_business_table.format(*to_string(result)) for result in b.get_results()]
query_to_execute = "BEGIN; \n" + '\n'.join(queries) + "\nCOMMIT;"
db.execute_query(query_to_execute)
if __name__ == "__main__":
parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
required.add_argument("-t", "--term", metavar='', required=True,
help="Search term, for example \"food\" or \"restaurants\". The term may also be business names, such as \"Starbucks.\".")
required.add_argument("-l", "--location", metavar='', required=True,
help="This string indicates the geographic area to be used when searching for businesses. ")
optional.add_argument("-p", "--price", type=int, metavar='', required=False, default=1,
help="Pricing levels to filter the search result with: 1 = $, 2 = $$, 3 = $$$, 4 = $$$$.")
main() | 44.690476 | 148 | 0.657977 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | create_business_schema = """CREATE SCHEMA IF NOT EXISTS yelp;"""
create_business_table = """
CREATE TABLE IF NOT EXISTS yelp.business (
business_id varchar PRIMARY KEY,
business_name varchar,
image_url varchar,
url varchar,
review_count int,
categories varchar,
rating float,
latitude float,
longitude float,
price varchar,
location varchar,
phone varchar
);
"""
insert_business_table = """INSERT INTO yelp.business VALUES ('{}', '{}', '{}', '{}', {}, '{}', {}, {}, {}, '{}', '{}', '{}')
ON CONFLICT (business_id)
DO UPDATE SET
business_id = EXCLUDED.business_id,
business_name = EXCLUDED.business_name,
image_url = EXCLUDED.image_url,
url = EXCLUDED.url,
review_count = EXCLUDED.review_count,
categories = EXCLUDED.categories,
rating = EXCLUDED.rating,
latitude = EXCLUDED.latitude,
longitude = EXCLUDED.longitude,
price = EXCLUDED.price,
location = EXCLUDED.location,
phone = EXCLUDED.phone;
""" | 35.371429 | 124 | 0.505503 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | import requests
from auth import headers
import json
class Request:
def __init__(self):
self._header = headers
@staticmethod
def get_content(url, param):
response = requests.get(url, headers=headers, params=param)
if response.status_code == 200:
return json.loads(response.content)
else:
print(f"Request completed with Error. Response Code : {response.status_code}")
return None | 27.875 | 90 | 0.635575 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | import configparser
import psycopg2
from sql_queries import copy_table_queries, insert_table_queries
def load_staging_tables(cur, conn):
for query in copy_table_queries:
cur.execute(query)
conn.commit()
def insert_tables(cur, conn):
for query in insert_table_queries:
cur.execute(query)
conn.commit()
def main():
config = configparser.ConfigParser()
config.read('dwh.cfg')
conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['CLUSTER'].values()))
cur = conn.cursor()
load_staging_tables(cur, conn)
insert_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main() | 20.625 | 112 | 0.638205 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | import configparser
import psycopg2
from sql_queries import create_table_queries, drop_table_queries
def drop_tables(cur, conn):
for query in drop_table_queries:
cur.execute(query)
conn.commit()
def create_tables(cur, conn):
for query in create_table_queries:
cur.execute(query)
conn.commit()
def main():
config = configparser.ConfigParser()
config.read('dwh.cfg')
conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['CLUSTER'].values()))
cur = conn.cursor()
drop_tables(cur, conn)
create_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main() | 20 | 112 | 0.636364 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | from create_tables import main as create_table_main
from etl import main as etl_main
if __name__ == "__main__":
create_table_main()
etl_main()
| 20.857143 | 51 | 0.664474 |
End of preview. Expand
in Dataset Viewer.
Dataset Card for "Customizable-Code-Assistant-Data"
Dataset Summary
This dataset contains is a dummy Version of the Customizable Code Assistant Dataset.
Supported Tasks and Leaderboards
Customizable Code Assistant is a dataset for code completion. The task is to predict the next token in a code snippet. The dataset is designed to be customizable, so that it can be used for different programming languages and different code completion tasks.
[More Information Needed]
- Downloads last month
- 47