repo_name
stringclasses 5
values | repo_url
stringclasses 5
values | repo_description
stringclasses 3
values | repo_stars
int64 6
15.8k
| repo_forks
int64 192
3.6k
| repo_last_updated
stringclasses 5
values | repo_created_at
stringclasses 5
values | repo_size
int64 513
2.13k
| repo_license
stringclasses 4
values | language
stringclasses 2
values | text
stringlengths 0
27.5k
| avg_line_length
float64 0
74.3
| max_line_length
int64 0
652
| alphnanum_fraction
float64 0
0.8
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | from rest_framework import generics
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from users.serializers import UserSerializer, AuthTokenSerializer
class UserCreateView(generics.CreateAPIView):
serializer_class = UserSerializer
permission_classes = ()
class ObtainTokenView(ObtainAuthToken):
serializer_class = AuthTokenSerializer
permission_classes = ()
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
| 24.15 | 65 | 0.804781 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | import os
from elasticsearch_dsl import analyzer
from django_elasticsearch_dsl import Document, fields, Index
from elasticsearch_dsl import analyzer
from news.models import News
news_index = Index(os.environ["ELASTIC_INDEX"])
news_index.settings(
number_of_shards=1,
number_of_replicas=1
)
html_strip = analyzer(
"html_strip",
tokenizer="standard",
filter=["lowercase", "stop", "snowball"],
char_filter=["html_strip"]
)
@news_index.doc_type
class NewsDocument(Document):
id = fields.TextField(attr="_id")
title = fields.TextField(
analyzer=html_strip,
fields={
"raw": fields.TextField(analyzer="keyword"),
}
)
link = fields.TextField()
published = fields.TextField(
fields={
"raw": fields.TextField(analyzer="keyword"),
},
fielddata=True
)
description = fields.TextField(
analyzer=html_strip,
fields={
"raw": fields.TextField(analyzer="keyword"),
}
)
author = fields.TextField(
fielddata=True
)
language = fields.TextField(
fielddata=True
)
class Django:
model = News
| 20.545455 | 60 | 0.617399 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | from elasticsearch.exceptions import NotFoundError
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
SEARCH_NEWS_URL = reverse("search:search-list")
SEARCH_DETAIL_NEWS_URL = reverse(
"search:search-detail", kwargs={"id":"missing_news"}
)
class PublicNewsApiTests(TestCase):
def setUp(self):
self.client = APIClient()
def test_login_required_news_list(self):
response = self.client.get(SEARCH_NEWS_URL)
self.assertEqual(
response.status_code, status.HTTP_401_UNAUTHORIZED
)
def test_login_required_news_detail(self):
response = self.client.get(SEARCH_DETAIL_NEWS_URL)
self.assertEqual(
response.status_code, status.HTTP_401_UNAUTHORIZED
)
class PrivateNewsApiTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
"[email protected]",
"testpass123"
)
self.client.force_authenticate(
self.user
)
def test_retrieve_news_list(self):
response = self.client.get(SEARCH_NEWS_URL)
self.assertEqual(
response.status_code, status.HTTP_200_OK
)
def test_retrieve_missing_news_detail(self):
with self.assertRaises(NotFoundError):
self.client.head(
SEARCH_DETAIL_NEWS_URL
)
| 24.666667 | 62 | 0.645874 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse("user:register")
GET_TOKEN_URL = reverse("user:login")
class UserApiTests(TestCase):
def setUp(self):
self.client = APIClient()
@staticmethod
def create_user(**kwargs):
return get_user_model().objects.create_user(
**kwargs
)
def test_create_valid_user(self):
attrs = {
"email": "[email protected]",
"password": "test123"
}
response = self.client.post(CREATE_USER_URL, attrs)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**response.data)
self.assertTrue(user.check_password(attrs["password"]))
self.assertNotIn("password", response.data)
def test_password_too_short(self):
attrs = {
"email": "[email protected]",
"password": "123"
}
response = self.client.post(CREATE_USER_URL, attrs)
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST
)
def test_user_exists(self):
attrs = {
"email": "[email protected]",
"password": "123"
}
self.create_user(**attrs)
response = self.client.post(CREATE_USER_URL, attrs)
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST
)
def test_create_token_for_user(self):
attrs = {
"email": "[email protected]",
"password": "123"
}
self.create_user(**attrs)
response = self.client.post(GET_TOKEN_URL, attrs)
self.assertIn("token", response.data)
self.assertEqual(
response.status_code, status.HTTP_200_OK
)
def test_create_token_invalid_credentials(self):
attrs = {
"email": "[email protected]",
"password": "12345"
}
self.create_user(email="[email protected]", password="wrong")
response = self.client.post(GET_TOKEN_URL, attrs)
self.assertNotIn("token", response.data)
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST
)
def test_create_token_no_user(self):
attrs = {
"email": "[email protected]",
"password": "12345"
}
response = self.client.post(GET_TOKEN_URL, attrs)
self.assertNotIn("token", response.data)
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST
)
| 28.858696 | 72 | 0.583394 |
DataEngineeringProject | https://github.com/damklis/DataEngineeringProject | Example end to end data engineering project. | 920 | 192 | 2023-12-04 19:31:15+00:00 | 2020-06-30 09:33:56+00:00 | 1,845 | MIT License | Python | from django.test import TestCase
from django.contrib.auth import get_user_model
class UserModelTests(TestCase):
email = "[email protected]"
email_upper = "[email protected]"
password = "testpassword"
def test_create_user_check_email(self):
user = get_user_model().objects.create_user(
email=self.email,
password=self.password
)
self.assertEqual(user.email, self.email)
def test_create_user_check_password(self):
user = get_user_model().objects.create_user(
email=self.email,
password=self.password
)
self.assertTrue(user.check_password(self.password))
def test_user_email_normalized(self):
user = get_user_model().objects.create_user(
email=self.email_upper,
password=self.password
)
self.assertEqual(user.email, self.email_upper.lower())
def test_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(
email=None,
password=self.password
)
def test_create_superuser(self):
user = get_user_model().objects.create_superuser(
email=self.email,
password=self.password
)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_superuser)
| 28.659574 | 62 | 0.612347 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Just Enough Python for Databricks SQL
# MAGIC
# MAGIC While Databricks SQL provides an ANSI-compliant flavor of SQL with many additional custom methods (including the entire Delta Lake SQL syntax), users migrating from some systems may run into missing features, especially around control flow and error handling.
# MAGIC
# MAGIC Databricks notebooks allow users to write SQL and Python and execute logic cell-by-cell. PySpark has extensive support for executing SQL queries, and can easily exchange data with tables and temporary views.
# MAGIC
# MAGIC Mastering just a handful of Python concepts will unlock powerful new design practices for engineers and analysts proficient in SQL. Rather than trying to teach the entire language, this lesson focuses on those features that can immediately be leveraged to write more extensible SQL programs on Databricks.
# MAGIC
# MAGIC ## Learning Objectives
# MAGIC By the end of this lesson, students should be able to:
# MAGIC * Print and manipulate multi-line Python strings
# MAGIC * Define variables and functions
# MAGIC * Use f-strings for variable substitution
# COMMAND ----------
# MAGIC %md
# MAGIC ## Strings
# MAGIC Characters enclosed in single (`'`) or double (`"`) quotes are considered strings.
# COMMAND ----------
"This is a string"
# COMMAND ----------
# MAGIC %md
# MAGIC To preview how a string will render, we can call `print()`.
# COMMAND ----------
print("This is a string")
# COMMAND ----------
# MAGIC %md
# MAGIC By wrapping a string in triple quotes (`"""`), it's possible to use multiple lines.
# COMMAND ----------
print("""
This
is
a
multi-line
string
""")
# COMMAND ----------
# MAGIC %md
# MAGIC This makes it easy to turn SQL queries into Python strings.
# COMMAND ----------
print("""
SELECT *
FROM test_table
""")
# COMMAND ----------
# MAGIC %md
# MAGIC When we execute SQL from a Python cell, we will pass a string as an argument to `spark.sql()`.
# COMMAND ----------
spark.sql("SELECT 1 AS test")
# COMMAND ----------
# MAGIC %md
# MAGIC To render a query the way it would appear in a normal SQL notebook, we call `display()` on this function.
# COMMAND ----------
display(spark.sql("SELECT 1 AS test"))
# COMMAND ----------
# MAGIC %md
# MAGIC **NOTE**: Executing a cell with only a Python string in it will just print the string. Using `print()` with a string just renders it back to the notebook.
# MAGIC
# MAGIC To execute a string that contains SQL using Python, it must be passed within a call to `spark.sql()`.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Variables
# MAGIC Python variables are assigned using the `=`.
# MAGIC
# MAGIC Python variable names need to start with a letter, and can only contain letters, numbers, hyphens, and underscores.
# MAGIC
# MAGIC Many Python programmers favor snake casing, which uses only lowercase letters and underscores for all variables.
# MAGIC
# MAGIC The cell below creates the variable `my_string`.
# COMMAND ----------
my_string = "This is a string"
# COMMAND ----------
# MAGIC %md
# MAGIC Executing a cell with this variable will return its value.
# COMMAND ----------
my_string
# COMMAND ----------
# MAGIC %md
# MAGIC The output here is the same as if we typed `"This is a string"` into the cell and ran it.
# MAGIC
# MAGIC Note that the quotation marks aren't part of the string, as shown when we print it.
# COMMAND ----------
print(my_string)
# COMMAND ----------
# MAGIC %md
# MAGIC This variable can be used the same way a string would be.
# MAGIC
# MAGIC String concatenation (joining to strings together) can be performed with a `+`.
# COMMAND ----------
print("This is a new string and " + my_string)
# COMMAND ----------
# MAGIC %md
# MAGIC We can join string variables with other string variables.
# COMMAND ----------
new_string = "This is a new string and "
print(new_string + my_string)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Functions
# MAGIC Functions allow you to specify local variables as arguments and then apply custom logic. We define a function using the keyword `def` followed by the function name and, enclosed in parentheses, any variable arguments we wish to pass into the function. Finally, the function header has a `:` at the end.
# MAGIC
# MAGIC Note: In Python, indentation matters. You can see in the cell below that the logic of the function is indented in from the left margin. Any code that is indented to this level is part of the function.
# MAGIC
# MAGIC The function below takes one argument (`arg`) and then prints it.
# COMMAND ----------
def print_string(arg):
print(arg)
# COMMAND ----------
# MAGIC %md
# MAGIC When we pass a string as the argument, it will be printed.
# COMMAND ----------
print_string("foo")
# COMMAND ----------
# MAGIC %md
# MAGIC We can also pass a variable as an argument.
# COMMAND ----------
print_string(my_string)
# COMMAND ----------
# MAGIC %md
# MAGIC Oftentimes we want to return the results of our function for use elsewhere. For this we use the `return` keyword.
# MAGIC
# MAGIC The function below constructs a new string by concatenating our argument. Note that both functions and arguments can have arbitrary names, just like variables (and follow the same rules).
# COMMAND ----------
def return_new_string(string_arg):
return "The string passed to this function was " + string_arg
# COMMAND ----------
# MAGIC %md
# MAGIC Running this function returns the output.
# COMMAND ----------
return_new_string("foobar")
# COMMAND ----------
# MAGIC %md
# MAGIC Assigning it to a variable captures the output for reuse elsewhere.
# COMMAND ----------
function_output = return_new_string("foobar")
# COMMAND ----------
# MAGIC %md
# MAGIC This variable doesn't contain our function, just the results of our function (a string).
# COMMAND ----------
function_output
# COMMAND ----------
# MAGIC %md
# MAGIC ## F-strings
# MAGIC By adding the letter `f` before a Python string, you can inject variables or evaluated Python code by inserted them inside curly braces (`{}`).
# MAGIC
# MAGIC Evaluate the cell below to see string variable substitution.
# COMMAND ----------
f"I can substitute {my_string} here"
# COMMAND ----------
# MAGIC %md
# MAGIC The following cell inserts the string returned by a function.
# COMMAND ----------
f"I can substitute functions like {return_new_string('foobar')} here"
# COMMAND ----------
# MAGIC %md
# MAGIC Combine this with triple quotes and you can format a paragraph or list, like below.
# COMMAND ----------
multi_line_string = f"""
I can have many lines of text with variable substitution:
- A variable: {my_string}
- A function output: {return_new_string('foobar')}
"""
print(multi_line_string)
# COMMAND ----------
# MAGIC %md
# MAGIC Or you could format a SQL query.
# COMMAND ----------
table_name = "users"
filter_clause = "WHERE state = 'CA'"
query = f"""
SELECT *
FROM {table_name}
{filter_clause}
"""
print(query)
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 26.276224 | 313 | 0.683205 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Just Enough Python for Databricks SQL
# MAGIC
# MAGIC ## Learning Objectives
# MAGIC By the end of this lesson, students should be able to:
# MAGIC * Leverage `if/else`
# MAGIC * Describe how errors impact notebook execution
# MAGIC * Write simple tests with `assert`
# MAGIC * Use `try/except` to handle errors
# COMMAND ----------
# MAGIC %md
# MAGIC ## `if/else`
# MAGIC
# MAGIC `if/else` clauses are common in many programming languages.
# MAGIC
# MAGIC Note that SQL has the `CASE WHEN ... ELSE` construct, which is similar.
# MAGIC
# MAGIC **If you're seeking to evaluate conditions within your tables or queries, use `CASE WHEN`.** Python control flow should be reserved for evaluating conditions outside of your query.
# MAGIC
# MAGIC More on this later. First, an example with `"beans"`.
# COMMAND ----------
food = "beans"
# COMMAND ----------
# MAGIC %md
# MAGIC Working with `if` and `else` is all about evaluating whether or not certain conditions are true in your execution environment.
# MAGIC
# MAGIC Note that in Python, we have the following comparison operators:
# MAGIC
# MAGIC | Syntax | Operation |
# MAGIC | --- | --- |
# MAGIC | `==` | equals |
# MAGIC | `>` | greater than |
# MAGIC | `<` | less than |
# MAGIC | `>=` | greater than or equal |
# MAGIC | `<=` | less than or equal |
# MAGIC | `!=` | not equal |
# MAGIC
# MAGIC If you read the sentence below out loud, you will be describing the control flow of your program.
# COMMAND ----------
if food == "beans":
print(f"I love {food}")
else:
print(f"I don't eat {food}")
# COMMAND ----------
# MAGIC %md
# MAGIC As expected, because the variable `food` is the string literal `"beans"`, the `if` statement evaluated to `True` and the first print statement evaluated.
# MAGIC
# MAGIC Let's assign a different value to the variable.
# COMMAND ----------
food = "beef"
# COMMAND ----------
# MAGIC %md
# MAGIC Now the first condition will evaluate as `False`. What do you think will happen when you run the following cell?
# COMMAND ----------
if food == "beans":
print(f"I love {food}")
else:
print(f"I don't eat {food}")
# COMMAND ----------
# MAGIC %md
# MAGIC Note that each time we assign a new value to a variable, this completely erases the old variable.
# COMMAND ----------
food = "potatoes"
print(food)
# COMMAND ----------
# MAGIC %md
# MAGIC The Python keyword `elif` (short for `else if`) allows us to evaluate multiple conditions.
# MAGIC
# MAGIC Note that conditions are evaluated from top to bottom. Once a condition evaluates to true, no further conditions will be evaluated.
# MAGIC
# MAGIC `if/else` control flow patterns:
# MAGIC 1. Must contain an `if` clause
# MAGIC 1. Can contain any number of `elif` clauses
# MAGIC 1. Can contain at most one `else` clause
# COMMAND ----------
if food == "beans":
print(f"I love {food}")
elif food == "potatoes":
print(f"My favorite vegetable is {food}")
elif food != "beef":
print(f"Do you have any good recipes for {food}?")
else:
print(f"I don't eat {food}")
# COMMAND ----------
# MAGIC %md
# MAGIC By encapsulating the above logic in a function, we can reuse this logic and formatting with arbitrary arguments rather than referencing globally-defined variables.
# COMMAND ----------
def foods_i_like(food):
if food == "beans":
print(f"I love {food}")
elif food == "potatoes":
print(f"My favorite vegetable is {food}")
elif food != "beef":
print(f"Do you have any good recipes for {food}?")
else:
print(f"I don't eat {food}")
# COMMAND ----------
# MAGIC %md
# MAGIC Here, we pass the string `"bread"` to the function.
# COMMAND ----------
foods_i_like("bread")
# COMMAND ----------
# MAGIC %md
# MAGIC As we evaluate the function, we locally assign the string `"bread"` to the `food` variable, and the logic behaves as expected.
# MAGIC
# MAGIC Note that we don't overwrite the value of the `food` variable as previously defined in the notebook.
# COMMAND ----------
food
# COMMAND ----------
# MAGIC %md
# MAGIC ## try/except
# MAGIC
# MAGIC While `if/else` clauses allow us to define conditional logic based on evaluating conditional statements, `try/except` focuses on providing robust error handling.
# MAGIC
# MAGIC Let's begin by considering a simple function.
# COMMAND ----------
def three_times(number):
return number * 3
# COMMAND ----------
# MAGIC %md
# MAGIC Let's assume that the desired use of this function is to multiply an integer value by 3.
# MAGIC
# MAGIC The below cell demonstrates this behavior.
# COMMAND ----------
three_times(2)
# COMMAND ----------
# MAGIC %md
# MAGIC Note what happens if a string is passed to the function.
# COMMAND ----------
three_times("2")
# COMMAND ----------
# MAGIC %md
# MAGIC In this case, we don't get an error, but we also do not get the desired outcome.
# MAGIC
# MAGIC `assert` statements allow us to run simple tests of Python code. If an `assert` statement evaluates to true, nothing happens. If it evaluates to false, an error is raised.
# MAGIC
# MAGIC Run the two cells below to assert the types of `2` and `"2"`.
# COMMAND ----------
assert type(2) == int
# COMMAND ----------
assert type("2") == int
# COMMAND ----------
# MAGIC %md
# MAGIC As expected, the string `"2"` does not evaluate as an integer.
# MAGIC
# MAGIC Python strings have a property to report whether or not they can be safely cast as numeric values.
# COMMAND ----------
assert "2".isnumeric()
# COMMAND ----------
# MAGIC %md
# MAGIC String numbers are common; you may see them as results from an API query, raw records in a JSON or CSV file, or returned by a SQL query.
# MAGIC
# MAGIC `int()` and `float()` are two common methods for casting values to numeric types. An `int` will always be a whole number, while a `float` will always have a decimal.
# COMMAND ----------
int("2")
# COMMAND ----------
# MAGIC %md
# MAGIC While Python will gladly cast a string containing numeric characters to a numeric type, it will not allow you to change other strings to numbers.
# COMMAND ----------
int("two")
# COMMAND ----------
# MAGIC %md
# MAGIC Note that errors will stop the execution of a notebook script; all cells after an error will be skipped when a notebook is scheduled as a production job.
# MAGIC
# MAGIC If we enclose code that might throw an error in a `try` statement, we can define alternate logic when an error is encountered.
# MAGIC
# MAGIC Below is a simple function that demonstrates this.
# COMMAND ----------
def try_int(num_string):
try:
return int(num_string)
except:
print(f"{num_string} is not a number!")
# COMMAND ----------
# MAGIC %md
# MAGIC When a numeric string is passed, the function will return the result as an integer.
# COMMAND ----------
try_int("2")
# COMMAND ----------
# MAGIC %md
# MAGIC When a non-numeric string is passed, an informative message is printed out.
# MAGIC
# MAGIC **NOTE**: An error is **not** raised, even though an error occurred, and no value was returned. Implementing logic that suppresses errors can lead to logic silently failing.
# COMMAND ----------
try_int("two")
# COMMAND ----------
# MAGIC %md
# MAGIC Below, our earlier function is updated to include logic for handling errors to return an informative message.
# COMMAND ----------
def three_times(number):
try:
return int(number) * 3
except ValueError as e:
print(f"""
You passed the string variable '{number}'.
The result of using this function would be to return the string '{number * 3}'.
Try passing an integer instead.
""")
# COMMAND ----------
# MAGIC %md
# MAGIC Now our function can process numbers passed as strings.
# COMMAND ----------
three_times("2")
# COMMAND ----------
# MAGIC %md
# MAGIC And prints an informative message when a string is passed.
# COMMAND ----------
three_times("two")
# COMMAND ----------
# MAGIC %md
# MAGIC Note that as implemented, this logic would only be useful for interactive execution of this logic (the message isn't currently being logged anywhere, and the code will not return the data in the desired format; human intervention would be required to act upon the printed message).
# COMMAND ----------
# MAGIC %md
# MAGIC ## Applying Python Control Flow for SQL Queries
# MAGIC
# MAGIC While the above examples demonstrate the basic principles of using these designs in Python, the goal of this lesson is to learn how to apply these concepts to executing SQL logic on Databricks.
# MAGIC
# MAGIC Let's revisit converting a SQL cell to execute in Python.
# MAGIC
# MAGIC **NOTE**: The following setup script ensures an isolated execution environment.
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE TEMP VIEW demo_tmp_vw(id, name, value) AS VALUES
# MAGIC (1, "Yve", 1.0),
# MAGIC (2, "Omar", 2.5),
# MAGIC (3, "Elia", 3.3);
# COMMAND ----------
# MAGIC %md
# MAGIC Run the SQL cell below to preview the contents of this temp view.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT * FROM demo_tmp_vw
# COMMAND ----------
# MAGIC %md
# MAGIC Running SQL in a Python cell simply requires passing the string query to `spark.sql()`.
# COMMAND ----------
query = "SELECT * FROM demo_tmp_vw"
spark.sql(query)
# COMMAND ----------
# MAGIC %md
# MAGIC But recall that executing a query with `spark.sql()` returns the results as a DataFrame rather than displaying them; below, the code is augmented to capture the result and display it.
# COMMAND ----------
query = "SELECT * FROM demo_tmp_vw"
result = spark.sql(query)
display(result)
# COMMAND ----------
# MAGIC %md
# MAGIC Using a simple `if` clause with a function allows us to execute arbitrary SQL queries, optionally displaying the results, and always returning the resultant DataFrame.
# COMMAND ----------
def simple_query_function(query, preview=True):
query_result = spark.sql(query)
if preview:
display(query_result)
return query_result
# COMMAND ----------
result = simple_query_function(query)
# COMMAND ----------
# MAGIC %md
# MAGIC Below, we execute a different query and set preview to `False`, as the purpose of the query is to create a temp view rather than return a preview of data.
# COMMAND ----------
new_query = "CREATE OR REPLACE TEMP VIEW id_name_tmp_vw AS SELECT id, name FROM demo_tmp_vw"
simple_query_function(new_query, preview=False)
# COMMAND ----------
# MAGIC %md
# MAGIC We now have a simple extensible function that could be further parameterized depending on the needs of our organization.
# MAGIC
# MAGIC For example, suppose we want to protect our company from malicious SQL, like the query below.
# COMMAND ----------
injection_query = "SELECT * FROM demo_tmp_vw; DROP DATABASE prod_db CASCADE; SELECT * FROM demo_tmp_vw"
# COMMAND ----------
# MAGIC %md
# MAGIC Below, we define a simple search for a semi-colon in the text, then use an assert statement with `try/except` to raise a custom error message.
# COMMAND ----------
def injection_check(query):
semicolon_index = query.find(";")
try:
assert semicolon_index < 0, f"Query contains semi-colon at index {semicolon_index}\nBlocking execution to avoid SQL injection attack"
except AssertionError as e:
print(query)
raise e
# COMMAND ----------
# MAGIC %md
# MAGIC **NOTE**: The example shown here is not sophisticated, but seeks to demonstrate a general principle. Always be wary of allowing untrusted users to pass text that will be passed to SQL queries. Also note that only one query can be executed using `spark.sql()`, so text with a semi-colon will always throw an error.
# COMMAND ----------
injection_check(injection_query)
# COMMAND ----------
# MAGIC %md
# MAGIC If we add this method to our earlier query function, we now have a more robust function that will assess each query for potential threats before execution.
# COMMAND ----------
def secure_query_function(query, preview=True):
injection_check(query)
query_result = spark.sql(query)
if preview:
display(query_result)
return query_result
# COMMAND ----------
# MAGIC %md
# MAGIC As expected, we see normal performance with a safe query.
# COMMAND ----------
secure_query_function(query)
# COMMAND ----------
# MAGIC %md
# MAGIC But prevent execution when when bad logic is run.
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 27.69697 | 321 | 0.670061 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Just Enough Python for Databricks SQL Lab
# MAGIC
# MAGIC ## Learning Objectives
# MAGIC By the end of this lesson, students should be able to:
# MAGIC * Review basic Python code and describe expected outcomes of code execution
# MAGIC * Reason through control flow statements in Python functions
# MAGIC * Add parameters to a SQL query by wrapping it in a Python function
# COMMAND ----------
# MAGIC %md
# MAGIC # Reviewing Python Basics
# MAGIC
# MAGIC In the previous notebook, we briefly explored using `spark.sql()` to execute arbitrary SQL commands from Python.
# MAGIC
# MAGIC Look at the following 3 cells. Before executing each cell, identify:
# MAGIC 1. The expected output of cell execution
# MAGIC 1. What logic is being executed
# MAGIC 1. Changes to the resultant state of the environment
# MAGIC
# MAGIC Then execute the cells, compare the results to your expectations, and see the explanations below.
# COMMAND ----------
course = "python_for_sql"
# COMMAND ----------
spark.sql(f"SELECT '{course}'")
# COMMAND ----------
display(spark.sql(f"SELECT '{course}'"))
# COMMAND ----------
# MAGIC %md
# MAGIC 1. `Cmd 3` assigns a string to a variable. When a variable assignment is successful, no output is displayed to the notebook. A new variable is added to the current execution environment.
# MAGIC 1. `Cmd 4` executes a SQL query and returns the results as a DataFrame. In this case, the SQL query is just to select a string, so no changes to our environment occur. When a returned DataFrame is not captured, the schema for the DataFrame is displayed alongside the word `DataFrame`.
# MAGIC 1. `Cmd 5` executes the same SQL query and displays the returned DataFrame. This combination of `display()` and `spark.sql()` most closely mirrors executing logic in a `%sql` cell; the results will always be printed in a formatted table, assuming results are returned by the query; some queries will instead manipulate tables or databases, in which case the work `OK` will print to show successful execution. In this case, no changes to our environment occur from running this code.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Setting Up a Development Environment
# MAGIC
# MAGIC Throughout this course, we use logic similar to the follow cell to capture information about the user currently executing the notebook and create an isolated development database.
# MAGIC
# MAGIC The `re` library is the [standard Python library for regex](https://docs.python.org/3/library/re.html).
# MAGIC
# MAGIC Databricks SQL has a special method to capture the username of the `current_user()`; and the `.first()[0]` code is a quick hack to capture the first row of the first column of a query executed with `spark.sql()` (in this case, we do this safely knowing that there will only be 1 row and 1 column).
# MAGIC
# MAGIC All other logic below is just string formatting.
# COMMAND ----------
import re
username = spark.sql("SELECT current_user()").first()[0]
userhome = f"dbfs:/user/{username}/{course}"
database = f"""{course}_{re.sub("[^a-zA-Z0-9]", "_", username)}_db"""
print(f"""
username: {username}
userhome: {userhome}
database: {database}
""")
# COMMAND ----------
# MAGIC %md
# MAGIC Below, we add a simple control flow statement to this logic to create and use this user-specific database. Optionally, we will reset this database and drop all of the contents on repeat execution. (Note the the default mode is `"reset"`).
# COMMAND ----------
def create_database(course, mode="reset"):
import re
username = spark.sql("SELECT current_user()").first()[0]
userhome = f"dbfs:/user/{username}/{course}"
database = f"""{course}_{re.sub("[^a-zA-Z0-9]", "_", username)}_db"""
print(f"""
username: {username}
userhome: {userhome}
database: {database}
""")
if mode == "reset":
spark.sql(f"DROP DATABASE IF EXISTS {database} CASCADE")
dbutils.fs.rm(userhome, True)
spark.sql(f"""
CREATE DATABASE IF NOT EXISTS {database}
LOCATION '{userhome}'
""")
spark.sql(f"USE {database}")
create_database(course)
# COMMAND ----------
# MAGIC %md
# MAGIC While this logic as defined is geared toward isolating students in shared workspaces for instructional purposes, the same basic design could be leveraged for testing new logic in an isolated environment before pushing to production.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Handling Errors Gracefully
# MAGIC
# MAGIC Review the logic in the function below.
# MAGIC
# MAGIC Note that we've just declared a new database that currently contains no tables.
# COMMAND ----------
def query_or_make_demo_table(table):
try:
display(spark.sql(f"SELECT * FROM {table}"))
except:
spark.sql(f"""
CREATE TABLE {table}
(id INT, name STRING, value DOUBLE, state STRING)
""")
spark.sql(f"""
INSERT INTO {table}
VALUES (1, "Yve", 1.0, "CA"),
(2, "Omar", 2.5, "NY"),
(3, "Elia", 3.3, "OH"),
(4, "Rebecca", 4.7, "TX"),
(5, "Ameena", 5.3, "CA"),
(6, "Ling", 6.6, "NY"),
(7, "Pedro", 7.1, "KY")
""")
display(spark.sql(f"SELECT * FROM {table}"))
# COMMAND ----------
# MAGIC %md
# MAGIC Try to identify the following before executing the next cell:
# MAGIC 1. The expected output of cell execution
# MAGIC 1. What logic is being executed
# MAGIC 1. Changes to the resultant state of the environment
# COMMAND ----------
query_or_make_demo_table("demo_table")
# COMMAND ----------
# MAGIC %md
# MAGIC Now answer the same three questions before running the same query below.
# COMMAND ----------
query_or_make_demo_table("demo_table")
# COMMAND ----------
# MAGIC %md
# MAGIC - On the first execution, the table `demo_table` did not yet exist. As such, the attempt to return the contents of the table created an error, which resulted in our `except` block of logic executing. This block:
# MAGIC 1. Created the table
# MAGIC 1. Inserted values
# MAGIC 1. Returned the contents of the table
# MAGIC - On the second execution, the table `demo_table` already exists, and so the first query in the `try` block executes without error. As a result, we just display the results of the query without modifying anything in our environment.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Adapting SQL to Python
# MAGIC Let's consider the following SQL query against our demo table created above.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT id, value
# MAGIC FROM demo_table
# MAGIC WHERE state = "CA"
# COMMAND ----------
# MAGIC %md
# MAGIC Let's use this simple example to practice creating a Python function that adds optional functionality.
# MAGIC
# MAGIC Our target function will:
# MAGIC * Always return only the `id` and `value` column from the a table named `demo_table`
# MAGIC * Allow filtering results by state, but default to all states
# MAGIC * Optionally return the query result object (a PySpark DataFrame)
# MAGIC
# MAGIC Stretch Goal:
# MAGIC * Add logic to check that if the value passed for the `state` filter contains two uppercase letters
# MAGIC
# MAGIC Some starter logic has been provided below.
# COMMAND ----------
# ANSWER
def preview_values(state=None, return_results=False):
query = "SELECT id, value FROM demo_table"
if state is not None:
# assert state == state.upper() and len(state) == 2, "Please use standard 2 letter uppercase state abbreviations"
query += f" WHERE state = '{state}'"
query_results = spark.sql(query)
display(query_results)
if return_results == True:
return query_results
# COMMAND ----------
# MAGIC %md
# MAGIC The assert statements below can be used to check whether or not your function works as intended.
# COMMAND ----------
import pyspark.sql.dataframe
assert preview_values(return_results=True).columns == ["id", "value"], "Query should only return `id` and `value` columns"
assert preview_values() == None, "Function should not return anything by default"
assert type(preview_values(return_results=True)) == pyspark.sql.dataframe.DataFrame, "Function should optionally return the DataFrame results"
assert preview_values(state="OH", return_results=True).first()[0] == 3, "Function should allow filtering by state"
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 37.485356 | 490 | 0.684897 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Structured Streaming Concepts
# MAGIC
# MAGIC ## Learning Objectives
# MAGIC By the end of this lesson, you should be able to:
# MAGIC * Describe the programming model used by Spark Structured Streaming
# MAGIC * Configure required options to perform a streaming read on a source
# MAGIC * Describe the requirements for end-to-end fault tolerance
# MAGIC * Configure required options to perform a streaming write to a sink
# MAGIC * Interact with streaming queries and stop active streams
# MAGIC
# MAGIC ## Datasets Used
# MAGIC The source contains smartphone accelerometer samples from devices and users with the following columns:
# MAGIC
# MAGIC | Field | Description |
# MAGIC | ------------- | ----------- |
# MAGIC | Arrival_Time | time data was received |
# MAGIC | Creation_Time | event time |
# MAGIC | Device | type of Model |
# MAGIC | Index | unique identifier of event |
# MAGIC | Model | i.e Nexus4 |
# MAGIC | User | unique user identifier |
# MAGIC | geolocation | city & country |
# MAGIC | gt | transportation mode |
# MAGIC | id | unused null field |
# MAGIC | x | acceleration in x-dir |
# MAGIC | y | acceleration in y-dir |
# MAGIC | z | acceleration in z-dir |
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Getting Started
# MAGIC
# MAGIC Run the following cell to configure our "classroom."
# COMMAND ----------
# MAGIC %run ../Includes/classic-setup $mode="reset"
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Micro-Batches as a Table
# MAGIC
# MAGIC For more information, see the analogous section in the [Structured Streaming Programming Guide](http://spark.apache.org/docs/latest/structured-streaming-programming-guide.html#basic-concepts) (from which several images have been borrowed).
# MAGIC
# MAGIC Spark Structured Streaming approaches streaming data by modeling it as a series of continuous appends to an unbounded table. While similar to defining **micro-batch** logic, this model allows incremental queries to be defined against streaming sources as if they were static input (though the fact that the input is an unbounded tables does impose some constraints).
# MAGIC
# MAGIC <img src="http://spark.apache.org/docs/latest/img/structured-streaming-stream-as-a-table.png"/>
# MAGIC
# MAGIC ### Basic Concepts
# MAGIC
# MAGIC - The developer defines an **input table** by configuring a streaming read against a **source**. The syntax for doing this is similar to working with static data.
# MAGIC - A **query** is defined against the input table. Both the DataFrames API and Spark SQL can be used to easily define transformations and actions against the input table.
# MAGIC - This logical query on the input table generates the **results table**. The results table contains the incremental state information of the stream.
# MAGIC - The **output** of a streaming pipeline will persist updates to the results table by writing to an external **sink**. Generally, a sink will be a durable system such as files or a pub/sub messaging bus.
# MAGIC - New rows are appended to the input table for each **trigger interval**. These new rows are essentially analogous to micro-batch transactions and will be automatically propagated through the results table to the sink.
# MAGIC
# MAGIC <img src="http://spark.apache.org/docs/latest/img/structured-streaming-model.png"/>
# COMMAND ----------
# MAGIC %md
# MAGIC ## End-to-end Fault Tolerance
# MAGIC
# MAGIC Structured Streaming ensures end-to-end exactly-once fault-tolerance guarantees through _checkpointing_ (discussed below) and <a href="https://en.wikipedia.org/wiki/Write-ahead_logging" target="_blank">Write Ahead Logs</a>.
# MAGIC
# MAGIC Structured Streaming sources, sinks, and the underlying execution engine work together to track the progress of stream processing. If a failure occurs, the streaming engine attempts to restart and/or reprocess the data.
# MAGIC For best practices on recovering from a failed streaming query see <a href="https://docs.databricks.com/spark/latest/structured-streaming/production.html#recover-from-query-failures" target="_blank">docs</a>.
# MAGIC
# MAGIC This approach _only_ works if the streaming source is replayable; replayable sources include cloud-based object storage and pub/sub messaging services.
# MAGIC
# MAGIC At a high level, the underlying streaming mechanism relies on a couple approaches:
# MAGIC
# MAGIC * First, Structured Streaming uses checkpointing and write-ahead logs to record the offset range of data being processed during each trigger interval.
# MAGIC * Next, the streaming sinks are designed to be _idempotent_—that is, multiple writes of the same data (as identified by the offset) do _not_ result in duplicates being written to the sink.
# MAGIC
# MAGIC Taken together, replayable data sources and idempotent sinks allow Structured Streaming to ensure **end-to-end, exactly-once semantics** under any failure condition.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Reading a Stream
# MAGIC
# MAGIC The `spark.readStream()` method returns a `DataStreamReader` used to configure and query the stream.
# MAGIC
# MAGIC Configuring a streaming read on a source requires:
# MAGIC * The schema of the data
# MAGIC * **NOTE**: Some streaming sources allow for schema inference
# MAGIC * The `format` of the source <a href="https://docs.databricks.com/spark/latest/structured-streaming/data-sources.html" target="_blank">file format or named connector</a>
# MAGIC * **NOTE**: `delta` is the default format for all reads and writes in Databricks
# MAGIC * Additional source-specific configuration options. For example:
# MAGIC * [`cloudFiles`](https://docs.databricks.com/spark/latest/structured-streaming/auto-loader-s3.html)
# MAGIC * <a href="https://docs.databricks.com/spark/latest/structured-streaming/kafka.html" target="_blank">Kafka</a>
# MAGIC * The name of the source table or the location of the files in object storage
# MAGIC
# MAGIC Below, we define a streaming read against a source (represented by `dataSource`) consisting of files from cloud storage.
# MAGIC
# MAGIC **NOTE**: We can think of this `DataStreamReader` as an incremental temp view defined against an ever-appending source table. Just as with a temp view, we only store the query plan when we set up an incremental read. It's not until we query results that we'll see compute happen.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ### The Schema
# MAGIC
# MAGIC Working with `cloudFiles` allows Databricks to automatically infer the schema from most file sources.
# MAGIC
# MAGIC Once data is loaded into a Delta Lake table, all schema for downstream incremental reads will be grabbed automatically from the table metadata.
# MAGIC
# MAGIC Here, we'll provide an explicit schema for our data.
# COMMAND ----------
schema = """Arrival_Time BIGINT,
Creation_Time BIGINT,
Device STRING,
Index BIGINT,
Model STRING,
User STRING,
geolocation STRUCT<
city: STRING,
country: STRING>,
gt STRING,
Id BIGINT,
x DOUBLE,
y DOUBLE,
z DOUBLE"""
# COMMAND ----------
# MAGIC %md
# MAGIC ## Creating a Streaming Temp View
# MAGIC
# MAGIC Below we pull all of the above concepts together to define a streaming read.
# MAGIC
# MAGIC If we were continuing to build out our query with PySpark, we would capture this as a DataFrame. Instead, we use `createOrReplaceTempView` to create an entity that can be queried locally with SQL.
# COMMAND ----------
(spark
.readStream
.schema(schema)
.format("cloudFiles")
.option("cloudFiles.format", "json")
.load(dataSource)
.createOrReplaceTempView("streaming_tmp_vw")
)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Comparing to Static Reads
# MAGIC
# MAGIC The above logic provides us with more or less the same result as the static query below.
# COMMAND ----------
spark.sql(f"CREATE OR REPLACE TEMP VIEW static_tmp_vw AS SELECT * FROM json.`{dataSource}`")
# COMMAND ----------
# MAGIC %md
# MAGIC When we query a static read on data, we display the results of the query at a point in time.
# MAGIC
# MAGIC **NOTE**: The `display(spark.table())` pattern shown in the next cell is the same as executing a `SELECT * FROM` for a table or view. Later, we'll see that this allows us to pass streaming temp views back to the DataFrame API to write out a stream.
# COMMAND ----------
display(spark.table("static_tmp_vw"))
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC But when we execute a query on a streaming temporary view, we'll continue to update the results of the query as new data arrives in the source.
# MAGIC
# MAGIC Think of a query executed against a streaming temp view as an **always-on incremental query**.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT * FROM streaming_tmp_vw
# COMMAND ----------
# MAGIC %md
# MAGIC Before continuing, click `Stop Execution` at the top of the notebook, `Cancel` immediately under the cell, or run the following cell to stop all active streaming queries.
# COMMAND ----------
for s in spark.streams.active:
print("Stopping " + s.id)
s.stop()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Working with Streaming Data
# MAGIC We can execute most transformation against streaming temp views the same way we would with static data. Here, we'll run a simple aggregation to get counts of records for each `device`.
# MAGIC
# MAGIC Because we are querying a streaming temp view, this becomes a streaming query that executes indefinitely, rather than completing after retrieving a single set of results. For streaming queries like this, Databricks Notebooks include interactive dashboards that allow users to monitor streaming performance. Explore this below.
# MAGIC
# MAGIC ![](https://files.training.databricks.com/images/adbcore/streaming-dashboard.png)
# MAGIC
# MAGIC One important note regarding this example: this is merely displaying an aggregation of input as seen by the stream. **None of these records are being persisted anywhere at this point.**
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT device, COUNT(device) AS total_records
# MAGIC FROM streaming_tmp_vw
# MAGIC GROUP BY device
# COMMAND ----------
# MAGIC %md
# MAGIC Before continuing, click `Stop Execution` at the top of the notebook, `Cancel` immediately under the cell, or run the following cell to stop all active streaming queries.
# COMMAND ----------
for s in spark.streams.active:
print("Stopping " + s.id)
s.stop()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Persisting Streaming Results
# MAGIC
# MAGIC In order to persist incremental results, we need to pass our logic back to the PySpark Structured Streaming DataFrames API.
# MAGIC
# MAGIC Above, we created a temp view from a PySpark streaming DataFrame. If we create another temp view from the results of a query against a streaming temp view, we'll again have a streaming temp view.
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE TEMP VIEW device_counts_tmp_vw AS (
# MAGIC SELECT device, COUNT(device) AS total_records
# MAGIC FROM streaming_tmp_vw
# MAGIC GROUP BY device
# MAGIC )
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Writing a Stream
# MAGIC
# MAGIC To persist the results of a streaming query, we must write them out to durable storage. The `DataFrame.writeStream` method returns a `DataStreamWriter` used to configure the output.
# MAGIC
# MAGIC There are a number of required parameters to configure a streaming write:
# MAGIC * The `format` of the **output sink**; see <a href="https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html#output-sinks" target="_blank">documentation</a>
# MAGIC * The location of the **checkpoint directory**
# MAGIC * The **output mode**
# MAGIC * Configurations specific to the output sink, such as:
# MAGIC * <a href="https://docs.databricks.com/spark/latest/structured-streaming/kafka.html" target="_blank">Kafka</a>
# MAGIC * A <a href="https://spark.apache.org/docs/latest/api/python/pyspark.sql.html?highlight=foreach#pyspark.sql.streaming.DataStreamWriter.foreach"target="_blank">custom sink</a> via `writeStream.foreach(...)`
# MAGIC
# MAGIC Once the configuration is completed, we trigger the job with a call to `.table()`. If we didn't want to create a table and instead wanted to write directly to storage, we would use `.start()` instead.
# COMMAND ----------
# MAGIC %md
# MAGIC ### Checkpointing
# MAGIC
# MAGIC Databricks creates checkpoints by storing the current state of your streaming job to cloud storage.
# MAGIC
# MAGIC Checkpointing combines with write ahead logs to allow a terminated stream to be restarted and continue from where it left off.
# MAGIC
# MAGIC Checkpoints cannot be shared between separate streams.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Output Modes
# MAGIC
# MAGIC Streaming jobs have output modes similar to static/batch workloads. [More details here](https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html#output-modes).
# MAGIC
# MAGIC | Mode | Example | Notes |
# MAGIC | ------------- | ----------- | --- |
# MAGIC | **Append** | `.outputMode("append")` | Only the new rows appended to the Result Table since the last trigger are written to the sink. This is the default. |
# MAGIC | **Complete** | `.outputMode("complete")` | The entire updated Result Table is written to the sink. The individual sink implementation decides how to handle writing the entire table. |
# MAGIC | **Update** | `.outputMode("update")` | Only the rows in the Result Table that were updated since the last trigger will be outputted to the sink.|
# MAGIC
# MAGIC **NOTE**: Not all sinks will support `update` mode.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ### Defining the Trigger Interval
# MAGIC
# MAGIC When defining a streaming write, the `trigger` method specifies when the system should process the next set of data..
# MAGIC
# MAGIC | Trigger Type | Example | Notes |
# MAGIC |----------------------------------------|-----------|-------------|
# MAGIC | Unspecified | | The query will be executed as soon as the system has completed processing the previous query (this is the default) |
# MAGIC | Fixed interval micro-batches | `.trigger(processingTime="2 minutes")` | The query will be executed in micro-batches and kicked off at the user-specified intervals |
# MAGIC | One-time micro-batch | `.trigger(once=True)` | The query will execute _only one_ micro-batch to process all the available data and then stop on its own |
# MAGIC | Continuous w/fixed checkpoint interval | `.trigger(continuous="1 second")` | The query will be executed in a low-latency, <a href="http://spark.apache.org/docs/latest/structured-streaming-programming-guide.html#continuous-processing" target = "_blank">continuous processing mode</a>. _EXPERIMENTAL_ |
# MAGIC
# MAGIC Note that triggers are specified when defining how data will be written to a sink and control the frequency of micro-batches. By default, Spark will automatically detect and process all data in the source that has been added since the last trigger; some sources allow configuration to limit the size of each micro-batch.
# MAGIC
# MAGIC <img src="https://files.training.databricks.com/images/icon_best_24.png"/> Read <a href="https://databricks.com/blog/2017/05/22/running-streaming-jobs-day-10x-cost-savings.html" target="_blank">this blog post</a> to learn more about using one-time triggers to simplify CDC with a hybrid streaming/batch design.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Pulling It All Together
# MAGIC
# MAGIC The code below demonstrates using `spark.table()` to load data from a streaming temp view back to a DataFrame. Note that Spark will always load streaming views as a streaming DataFrame and static views as static DataFrames (meaning that incremental processing must be defined with read logic to support incremental writing).
# COMMAND ----------
streamingQuery = (spark.table("device_counts_tmp_vw")
.writeStream
.option("checkpointLocation", checkpointPath)
.outputMode("complete")
.trigger(processingTime='10 seconds')
.table("device_counts")
)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Querying the Output
# MAGIC Now let's query the output we've written from SQL. Because the result is a table, we only need to deserialize the data to return the results.
# MAGIC
# MAGIC Because we are now querying a table (not a streaming DataFrame), the following will **not** be a streaming query.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT *
# MAGIC FROM device_counts
# COMMAND ----------
# MAGIC %md
# MAGIC ## Debugging with the Memory Sink
# MAGIC
# MAGIC The **memory** sink can be a useful tool for debugging. It provides a quick and easy sink requiring no setup. The output is stored as an in-memory table, with a name defined using `queryName`.
# MAGIC
# MAGIC <img src="https://files.training.databricks.com/images/icon_warn_24.png"/> This should be used only for debugging purposes with low data volumes, since the entire output is collected and stored in the driver’s memory.
# COMMAND ----------
streamingQueryMem = (spark.table("streaming_tmp_vw")
.writeStream
.format("memory") # memory = store in-memory table (for testing only)
.queryName("streaming_query_mem") # name of the in-memory table
.outputMode("append")
.start()
)
# COMMAND ----------
# MAGIC %md
# MAGIC Let's examine the contents of the in-memory table with the same query used previously. Like the previous query we ran against the Delta output, this will **not** be a streaming query. In this case, we are simply querying the in-memory table established by the memory sink in the previous cell.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT device, COUNT(device) AS total_records
# MAGIC FROM streaming_query_mem
# MAGIC GROUP BY device
# COMMAND ----------
# MAGIC %md
# MAGIC ## Interacting with Streaming Queries
# MAGIC
# MAGIC
# MAGIC the logic defined above, data is read from JSON files and then saved out in the Delta Lake format. Note that because Delta creates a new version for each transaction, when working with streaming data this will mean that the Delta table creates a new version for each trigger interval in which new data is processed. [More info on streaming with Delta](https://docs.databricks.com/delta/delta-streaming.html#table-streaming-reads-and-writes).
# COMMAND ----------
# MAGIC %md
# MAGIC The `recentProgress` attribute allows access to metadata about recently processed micro-batches. Let's dump the contents for the streaming query created earlier.
# COMMAND ----------
streamingQuery.recentProgress
# COMMAND ----------
# MAGIC %md
# MAGIC In addition to referencing `StreamingQuery` objects returned by `writeStream`, as we did above, we can iterate on the `streams.active` attribute in `SparkSession` to identify all active streaming queries.
# COMMAND ----------
for s in spark.streams.active: # Iterate over all streams
print(s.id) # Print the stream's id
# COMMAND ----------
# MAGIC %md
# MAGIC Let's iterate on all active streams and stop them. This is an important thing to do here, for if you don't then your cluster will run indefinitely!
# MAGIC
# MAGIC After running the following cell, feel free to examine the cells earlier that initiated streaming queries; notice they have both been canceled.
# COMMAND ----------
for s in spark.streams.active:
print("Stopping " + s.id)
s.stop()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Incremental Ingestion with Auto Loader
# MAGIC
# MAGIC Incremental ETL is important since it allows us to deal solely with new data that has been encountered since the last ingestion. Reliably processing only the new data is key to achieving scalability.
# MAGIC
# MAGIC Ingesting into a Delta Lake table from a data lake is a common use case that has traditionally been challenging to properly set up, typically relying on the integration of always-on services like Kafka to track the files that have been ingested, and to monitor cloud storage for new file arrivals. Databricks Auto Loader abstracts all this and provides an easy-to-use mechanism for incrementally and efficiently processing new data files as they arrive in cloud file storage, in the form of a structured streaming source.
# MAGIC
# MAGIC Given an input directory path on the cloud file storage, the `cloudFiles` source automatically processes new files as they arrive, with the option of also processing existing files in that directory. For full details, refer to the <a href="https://docs.databricks.com/spark/latest/structured-streaming/auto-loader.html" target="_blank">documentation</a>.
# MAGIC
# MAGIC **Due to the benefits and scalability that Auto Loader delivers, Databricks recommends its use as general best practice when ingesting data from cloud storage.**
# COMMAND ----------
# MAGIC %md
# MAGIC Reset the output directory in preparation to stream data using Auto Loader.
# COMMAND ----------
# MAGIC %run ../Includes/classic-setup $mode="reset"
# COMMAND ----------
# MAGIC %md
# MAGIC ### Reading Data with Auto Loader
# MAGIC
# MAGIC An example invocation of Auto Loader is provided below. Comparing against the standard streaming read from earlier, notice the following differences:
# MAGIC
# MAGIC * Specify a `format` of `cloudFiles`
# MAGIC * Specify the underlying format of the data using the `cloudFiles.format` option
# MAGIC * The `dataLandingLocation` source below represents a cloud storage location from where data is being ingested
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ### Schema Inference and Evolution
# MAGIC
# MAGIC As mentioned earlier, every streaming DataFrame must have a schema. But Auto Loader can be configured to take a more active role in inferring and maintaining the schema of the data as it evolves.
# MAGIC
# MAGIC By omitting a schema specification, Auto Loader will detect the schema based on the data seen on the input. Specifying the `cloudFiles.schemaLocation` option allows Auto Loader to track the schema, thereby improving performances and ensuring stability of the schema across stream restart. A common pattern is to use `checkpointLocation` for this purpose.
# MAGIC
# MAGIC <img src="https://files.training.databricks.com/images/icon_warn_24.png"/> There must be data present for schema inference to work; otherwise you must specify a schema.
# MAGIC
# MAGIC **Schema evolution** allows changes to a schema in response to data that changes over time. This can be an important feature in some use cases.
# COMMAND ----------
incrementalStreamingDF = (spark
.readStream
.format("cloudFiles")
.option("cloudFiles.format", "json")
.option("cloudFiles.schemaLocation", checkpointPath)
.load(dataLandingLocation)
)
# COMMAND ----------
# MAGIC %md
# MAGIC Writing the output also takes a similar form as the previous streaming case. Note the following differences:
# MAGIC * Specify `mergeSchema` option to activate schema evolution. If any changes to the schema occur over time, the schema is adapted rather than rejecting the write. This can be useful in some use cases.
# MAGIC * Omitting the trigger to allow the query to continue running, ingesting new data as it arrives. If you wish to schedule your ETL process to run in batch mode, consider using a one-time trigger instead.
# COMMAND ----------
(incrementalStreamingDF
.writeStream
.format("delta")
.option("checkpointLocation", checkpointPath)
.option("mergeSchema", "true")
.table(outputTable)
)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Querying the Output
# MAGIC By now the following query against the output table will likely seem familiar. Run it a few times, and it will become apparent that nothing changes, as no data is arriving in our simulated cloud storage.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT Device,COUNT(Device) AS Count
# MAGIC FROM ${c.outputTable}
# MAGIC GROUP BY Device
# COMMAND ----------
# MAGIC %md
# MAGIC ## Land New Data
# MAGIC Run the following cell to simulate the arrival of new data in our cloud storage. Each time you execute the cell below, a new file will be written to our source directory. Following this cell, observe the stream monitor above, and notice the impact on the results when re-running the query.
# COMMAND ----------
File.newData()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Clean Up
# MAGIC Stop active streams and remove created resources before continuing.
# COMMAND ----------
# MAGIC %run ../Includes/classic-setup $mode="clean"
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC <h2><img src="https://files.training.databricks.com/images/105/logo_spark_tiny.png"> Summary</h2>
# MAGIC
# MAGIC We used `readStream` to stream input from a variety of sources, including Databricks Auto Loader. Auto Loader augments Structured Streaming functionality by providing an easy-to-use interface of performing incremental ETL from cloud storage.
# MAGIC
# MAGIC We also explored various options for consuming, writing and querying the streamed input data.
# MAGIC
# MAGIC Finally, we explored the array of active streams maintained in the `SparkSession` object.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC <h2><img src="https://files.training.databricks.com/images/105/logo_spark_tiny.png"> Additional Topics & Resources</h2>
# MAGIC
# MAGIC * <a href="https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html#" target="_blank">Structured Streaming Programming Guide</a><br>
# MAGIC * <a href="https://www.youtube.com/watch?v=rl8dIzTpxrI" target="_blank">A Deep Dive into Structured Streaming</a> by Tathagata Das. This is an excellent video describing how Structured Streaming works.
# MAGIC * <a href="https://docs.databricks.com/spark/latest/structured-streaming/production.html#id2" target="_blank">Failed Streaming Query Recovery</a> Best Practices for Recovery.
# MAGIC * <a href="https://databricks.com/blog/2018/03/20/low-latency-continuous-processing-mode-in-structured-streaming-in-apache-spark-2-3-0.html" target="_blank">Continuous Processing Mode</a> Lowest possible latency stream processing. Currently Experimental.
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 48.116071 | 529 | 0.72135 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Windows and Watermarks
# MAGIC
# MAGIC ## Learning Objectives
# MAGIC By the end of this lesson, you should be able to:
# MAGIC * Explain why some methods will not work on streaming data
# MAGIC * Use windows to aggregate over chunks of data rather than all data
# MAGIC * Compare tumbling windows and sliding windows
# MAGIC * Apply watermarking to manage state
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Getting Started
# MAGIC
# MAGIC Run the following cell to configure our "classroom."
# COMMAND ----------
# MAGIC %run ../Includes/classic-setup $mode="reset"
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Configure Streaming Read
# MAGIC
# MAGIC This lesson uses the same data as the previous notebook, again loaded with AutoLoader.
# MAGIC
# MAGIC The code below registers a streaming DataFrame (which we'll use again in a moment) and a streaming temp view.
# MAGIC
# MAGIC Note the use of the `selectExpr` method, which allows multiple SQL operations to be configured on a per column basis in PySpark DataFrames. Here, we're simplifying the data to be dealt with by selecting only two columns:
# MAGIC * `Creation_Time`, originally encoded in nanoseconds, is converted to unixtime and renamed to `creation_time`
# MAGIC * `gt` is renamed to `action`
# COMMAND ----------
from pyspark.sql.functions import col
schema = """Arrival_Time BIGINT,
Creation_Time BIGINT,
Device STRING,
Index BIGINT,
Model STRING,
User STRING,
geolocation STRUCT<
city: STRING,
country: STRING>,
gt STRING,
Id BIGINT,
x DOUBLE,
y DOUBLE,
z DOUBLE"""
streamingDF = (spark
.readStream
.format("cloudFiles")
.option("cloudFiles.format", "json")
.schema(schema)
.load(dataLandingLocation)
.selectExpr("cast(Creation_Time/1E9 AS timestamp) AS creation_time", "gt AS action")
)
streamingDF.createOrReplaceTempView("streaming_tmp_vw")
# COMMAND ----------
# MAGIC %md
# MAGIC ### Unsupported Operations
# MAGIC
# MAGIC Most operations on a streaming DataFrame are identical to a static DataFrame. There are <a href="https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html#unsupported-operations" target="_blank">some exceptions to this</a>.
# MAGIC
# MAGIC Consider the model of the data as a constantly appending table. Sorting is one of a handful of operations that is either too complex or logically not possible to do when working with streaming data.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT * FROM streaming_tmp_vw
# MAGIC ORDER BY creation_time DESC
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Streaming Aggregations
# MAGIC
# MAGIC Continuous applications often require near real-time decisions on real-time, aggregated statistics.
# MAGIC
# MAGIC Some examples include
# MAGIC * Aggregating errors in data from IoT devices by type
# MAGIC * Detecting anomalous behavior in a server's log file by aggregating by country
# MAGIC * Performing behavior analysis on instant messages via hash tags
# MAGIC
# MAGIC While these streaming aggregates may need to reference historic trends, analytics will generally be calculated over discrete units of time. Spark Structured Streaming supports time-based **windows** on streaming DataFrames to make these calculations easy.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ### What is Time?
# MAGIC
# MAGIC Multiple times may be associated with each streaming event. Consider the discrete differences between the time at which the event data was:
# MAGIC - Generated
# MAGIC - Written to the streaming source
# MAGIC - Processed into Spark
# MAGIC
# MAGIC Each of these times will be recorded from the system clock of the machine running the process, with discrepancies and latencies being introduced due to many different causes.
# MAGIC
# MAGIC Generally speaking, most analytics will be interested in the time the data was generated. As such, this lesson will focus on timestamps recorded at the time of data generation, which we will refer to as the **event time**.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Windowing
# MAGIC
# MAGIC Defining windows on a time series field imposes a time range constraint on an otherwise unbounded input. This allows users to utilize this field for aggregations in the same way they would use distinct values when calling `GROUP BY`. Spark maintains a state table with aggregates for each user-defined bucket of time.
# MAGIC
# MAGIC Spark supports three types of windows:
# MAGIC
# MAGIC * **Tumbling windows**: fixed-size windows, regularly recurring windows that do not overlap. Each event will be aggregated into only one window.
# MAGIC * **Sliding windows**: fixed-size windows, regularly recurring windows that overlap. Each event may be aggregated into multiple windows.
# MAGIC * **Session windows**: dynamic windows whose start time and duration depends on the inputs. An event will trigger the start of a window that will, in general, continue until a predetermined duration after the last event received.
# MAGIC
# MAGIC <img src="https://spark.apache.org/docs/latest/img/structured-streaming-time-window-types.jpg">
# COMMAND ----------
# MAGIC %md
# MAGIC The following diagram illustrates in greater detail the concept of sliding windows and how events received at various times will be aggregated into the various windows (assuming that the slide duration is less than the window duration, which leads to overlapping windows):
# MAGIC
# MAGIC <img src="https://spark.apache.org/docs/latest/img/structured-streaming-window.png"/>
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ### Consuming with a Windowed Aggregation
# MAGIC
# MAGIC Let's consume the stream from SQL in a windowed aggregation using the SQL `window` function, which accepts a timestamp column and a window duration to define the tumbling windows. An optional third argument specifies a slide duration that allows the definition of a sliding window.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT
# MAGIC window.start AS start,
# MAGIC action,
# MAGIC count(action) AS count
# MAGIC FROM streaming_tmp_vw
# MAGIC GROUP BY
# MAGIC window(creation_time, '1 hour'),
# MAGIC action
# MAGIC ORDER BY
# MAGIC start,
# MAGIC action
# COMMAND ----------
# MAGIC %md
# MAGIC Once a batch of data has loaded, render the results as a bar graph with the following settings:
# MAGIC
# MAGIC * **Keys** is set to `start`
# MAGIC * **Series groupings** is set to `action`
# MAGIC * **Values** is set to `count`
# COMMAND ----------
# MAGIC %md
# MAGIC ### Land New Data
# MAGIC Recall that our stream has been set up for incremental ingestion. Invoke the following cell a few times to simulate the arrival of new data. Note the impact on the results reported above.
# COMMAND ----------
File.newData()
# COMMAND ----------
# MAGIC %md
# MAGIC ### Performance Considerations
# MAGIC Because aggregation is a <a href="https://databricks.com/glossary/what-are-transformations" target="_blank">wide transformation</a>, it will trigger a shuffle. Configuring the number of partitions can reduce the number of tasks and properly balance the workload for the cluster.
# MAGIC
# MAGIC In most cases, a 1-to-1 mapping of partitions to cores is ideal for streaming applications. The code below sets the number of partitions to 4, which maps perfectly to a cluster with 4 cores.
# COMMAND ----------
spark.conf.set("spark.sql.shuffle.partitions", 4)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Watermarking
# MAGIC
# MAGIC
# MAGIC When aggregating with an unbounded input, Spark's fault-tolerant state management naturally incurs some processing overhead. To keep these overheads bounded within acceptable limits, the size of the state data should not grow indefinitely. However, with sliding windows, the number of windows/groups will grow indefinitely, and so can the size of state (proportional to the number of groups). To bound the state size, we have to be able to drop old aggregates that are not going to be updated anymore. We achieve this using **watermarking**.
# MAGIC
# MAGIC Watermarking allows users to define a cutoff threshold for how much state should be maintained. This cutoff is calculated against the most recently seen event time. Data arriving after this threshold will be discarded.
# MAGIC
# MAGIC The <a href="https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.sql.DataFrame.withWatermark.html" target="_blank">`withWatermark`</a> method allows users to easily define this cutoff threshold.
# MAGIC
# MAGIC Note that there is no built-in support for watermarking in Spark SQL, but we can define this in PySpark before creating a temp view, as shown below.
# COMMAND ----------
(streamingDF
.withWatermark("creation_time", "2 hours") # Specify a 2-hour watermark
.createOrReplaceTempView("watermarked_tmp_vw")
)
# COMMAND ----------
# MAGIC %md
# MAGIC By directing our windowed aggregation at this new temp view, we can easily achieve the same outcome while managing state information.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT
# MAGIC window.start AS start,
# MAGIC action,
# MAGIC count(action) AS count
# MAGIC FROM watermarked_tmp_vw
# MAGIC GROUP BY
# MAGIC window(creation_time, '1 hour'),
# MAGIC action
# MAGIC ORDER BY
# MAGIC start,
# MAGIC action
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC
# MAGIC ## Example Details
# MAGIC
# MAGIC The threshold is always calculated against the max event time seen.
# MAGIC
# MAGIC In the example above,
# MAGIC * The in-memory state is limited to two hours of historic data.
# MAGIC * Data arriving more than 2 hours late should be dropped.
# MAGIC * Data received within 2 hours of being generated will never be dropped.
# MAGIC
# MAGIC <img alt="Caution" title="Caution" style="vertical-align: text-bottom; position: relative; height:1.3em; top:0.0em" src="https://files.training.databricks.com/static/images/icon-warning.svg"/> This guarantee is strict in only one direction. Data delayed by more than 2 hours is not guaranteed to be dropped; it may or may not get aggregated. The more delayed the data is, the less likely the engine is going to process it.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Writing Results
# MAGIC
# MAGIC Previously we used `spark.table()` to pass SQL logic stored in temp views back to a DataFrame to write out streaming results.
# MAGIC
# MAGIC Below, we instead use `spark.sql()` and pass the entire SQL query.
# COMMAND ----------
(spark.sql("""
SELECT
window.start AS start,
action,
count(action) AS count
FROM watermarked_tmp_vw
GROUP BY
window(creation_time, '1 hour'),
action
ORDER BY
start,
action
""").writeStream
.option("checkpointLocation", checkpointPath)
.outputMode("complete")
.table("action_counts")
)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Clean Up
# COMMAND ----------
# MAGIC %run ../Includes/classic-setup $mode="clean"
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Summary
# MAGIC
# MAGIC * A handful of operations valid for static DataFrames will not work with streaming data
# MAGIC * Windows allow users to define time-based buckets for aggregating streaming data
# MAGIC * Watermarking allows users to manage the amount of state being calculated with each trigger and define how late-arriving data should be handled
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 38.604502 | 549 | 0.725317 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Incremental Multi-Hop in the Lakehouse
# MAGIC
# MAGIC Now that we have a better understanding of how to work with incremental data processing by combining Structured Streaming APIs and Spark SQL, we can explore the tight integration between Structured Streaming and Delta Lake.
# MAGIC
# MAGIC
# MAGIC
# MAGIC ## Learning Objectives
# MAGIC By the end of this lesson, you should be able to:
# MAGIC * Describe Bronze, Silver, and Gold tables
# MAGIC * Create a Delta Lake multi-hop pipeline
# COMMAND ----------
# MAGIC %md
# MAGIC ## Incremental Updates in the Lakehouse
# MAGIC
# MAGIC Delta Lake allows users to easily combine streaming and batch workloads in a unified multi-hop pipeline. Each stage of the pipeline represents a state of our data valuable to driving core use cases within the business. Because all data and metadata lives in object storage in the cloud, multiple users and applications can access data in near-real time, allowing analysts to access the freshest data as it's being processed.
# MAGIC
# MAGIC ![](https://files.training.databricks.com/images/sslh/multi-hop-simple.png)
# MAGIC
# MAGIC - **Bronze** tables contain raw data ingested from various sources (JSON files, RDBMS data, IoT data, to name a few examples).
# MAGIC
# MAGIC - **Silver** tables provide a more refined view of our data. We can join fields from various bronze tables to enrich streaming records, or update account statuses based on recent activity.
# MAGIC
# MAGIC - **Gold** tables provide business level aggregates often used for reporting and dashboarding. This would include aggregations such as daily active website users, weekly sales per store, or gross revenue per quarter by department.
# MAGIC
# MAGIC The end outputs are actionable insights, dashboards and reports of business metrics.
# MAGIC
# MAGIC By considering our business logic at all steps of the ETL pipeline, we can ensure that storage and compute costs are optimized by reducing unnecessary duplication of data and limiting ad hoc querying against full historic data.
# MAGIC
# MAGIC Each stage can be configured as a batch or streaming job, and ACID transactions ensure that we succeed or fail completely.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Datasets Used
# MAGIC
# MAGIC This demo uses simplified artificially generated medical data. The schema of our two datasets is represented below. Note that we will be manipulating these schema during various steps.
# MAGIC
# MAGIC #### Recordings
# MAGIC The main dataset uses heart rate recordings from medical devices delivered in the JSON format.
# MAGIC
# MAGIC | Field | Type |
# MAGIC | --- | --- |
# MAGIC | device_id | int |
# MAGIC | mrn | long |
# MAGIC | time | double |
# MAGIC | heartrate | double |
# MAGIC
# MAGIC #### PII
# MAGIC These data will later be joined with a static table of patient information stored in an external system to identify patients by name.
# MAGIC
# MAGIC | Field | Type |
# MAGIC | --- | --- |
# MAGIC | mrn | long |
# MAGIC | name | string |
# COMMAND ----------
# MAGIC %md
# MAGIC ## Getting Started
# MAGIC
# MAGIC Run the following cell to configure the lab environment.
# COMMAND ----------
# MAGIC %run "../Includes/multi-hop-setup" $mode="reset"
# COMMAND ----------
# MAGIC %md
# MAGIC ## Data Simulator
# MAGIC Databricks Auto Loader can automatically process files as they land in your cloud object stores. To simulate this process, you will be asked to run the following operation several times throughout the course.
# COMMAND ----------
File.newData()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Bronze Table: Ingesting Raw JSON Recordings
# MAGIC
# MAGIC Below, we configure a read on a raw JSON source using Auto Loader with schema inference.
# MAGIC
# MAGIC Note that while you need to use the Spark DataFrame API to set up an incremental read, once configured you can immediately register a temp view to leverage Spark SQL for streaming transformations on your data.
# MAGIC
# MAGIC **NOTE**: For a JSON data source, Auto Loader will default to inferring each column as a string. Here, we demonstrate specifying the data type for the `time` column using the `cloudFiles.schemaHints` option. Note that specifying improper types for a field will result in null values.
# COMMAND ----------
(spark.readStream
.format("cloudFiles")
.option("cloudFiles.format", "json")
.option("cloudFiles.schemaHints", "time DOUBLE")
.option("cloudFiles.schemaLocation", bronzeCheckpoint)
.load(dataLandingLocation)
.createOrReplaceTempView("recordings_raw_temp"))
# COMMAND ----------
# MAGIC %md
# MAGIC Here, we'll enrich our raw data with additional metadata describing the source file and the time it was ingested. This additional metadata can be ignored during downstream processing while providing useful information for troubleshooting errors if corrupt data is encountered.
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE TEMPORARY VIEW recordings_bronze_temp AS (
# MAGIC SELECT *, current_timestamp() receipt_time, input_file_name() source_file
# MAGIC FROM recordings_raw_temp
# MAGIC )
# COMMAND ----------
# MAGIC %md
# MAGIC The code below passes our enriched raw data back to PySpark API to process an incremental write to a Delta Lake table.
# COMMAND ----------
(spark.table("recordings_bronze_temp")
.writeStream
.format("delta")
.option("checkpointLocation", bronzeCheckpoint)
.outputMode("append")
.table("bronze"))
# COMMAND ----------
# MAGIC %md
# MAGIC Trigger another file arrival with the following cell and you'll see the changes immediately detected by the streaming query you've written.
# COMMAND ----------
File.newData()
# COMMAND ----------
# MAGIC %md
# MAGIC ### Load Static Lookup Table
# MAGIC The ACID guarantees that Delta Lake brings to your data are managed at the table level, ensuring that only fully successfully commits are reflected in your tables. If you choose to merge these data with other data sources, be aware of how those sources version data and what sort of consistency guarantees they have.
# MAGIC
# MAGIC In this simplified demo, we are loading a static CSV file to add patient data to our recordings. In production, we could use Databricks' <a href="https://docs.databricks.com/spark/latest/structured-streaming/auto-loader.html" target="_blank">Auto Loader</a> feature to keep an up-to-date view of these data in our Delta Lake.
# COMMAND ----------
(spark
.read
.format("csv")
.schema("mrn STRING, name STRING")
.option("header", True)
.load(f"{dataSource}/patient/patient_info.csv")
.createOrReplaceTempView("pii"))
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT * FROM pii
# COMMAND ----------
# MAGIC %md
# MAGIC ## Silver Table: Enriched Recording Data
# MAGIC As a second hop in our silver level, we will do the follow enrichments and checks:
# MAGIC - Our recordings data will be joined with the PII to add patient names
# MAGIC - The time for our recordings will be parsed to the format `'yyyy-MM-dd HH:mm:ss'` to be human-readable
# MAGIC - We will exclude heart rates that are <= 0, as we know that these either represent the absence of the patient or an error in transmission
# COMMAND ----------
(spark.readStream
.table("bronze")
.createOrReplaceTempView("bronze_tmp"))
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE TEMPORARY VIEW recordings_w_pii AS (
# MAGIC SELECT device_id, a.mrn, b.name, cast(from_unixtime(time, 'yyyy-MM-dd HH:mm:ss') AS timestamp) time, heartrate
# MAGIC FROM bronze_tmp a
# MAGIC INNER JOIN pii b
# MAGIC ON a.mrn = b.mrn
# MAGIC WHERE heartrate > 0)
# COMMAND ----------
(spark.table("recordings_w_pii")
.writeStream
.format("delta")
.option("checkpointLocation", recordingsEnrichedCheckpoint)
.outputMode("append")
.table("recordings_enriched"))
# COMMAND ----------
# MAGIC %md
# MAGIC Trigger another new file and wait for it propagate through both previous queries.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT COUNT(*) FROM recordings_enriched
# COMMAND ----------
File.newData()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Gold Table: Daily Averages
# MAGIC
# MAGIC Here we read a stream of data from `recordingsEnrichedPath` and write another stream to create an aggregate gold table of daily averages for each patient.
# COMMAND ----------
(spark.readStream
.table("recordings_enriched")
.createOrReplaceTempView("recordings_enriched_temp"))
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE TEMP VIEW patient_avg AS (
# MAGIC SELECT mrn, name, mean(heartrate) avg_heartrate, date_trunc("DD", time) date
# MAGIC FROM recordings_enriched_temp
# MAGIC GROUP BY mrn, name, date_trunc("DD", time))
# COMMAND ----------
# MAGIC %md
# MAGIC Note that we're using `.trigger(once=True)` below. This provides us the ability to continue to use the strengths of structured streaming while trigger this job as a single batch. To recap, these strengths include:
# MAGIC - exactly once end-to-end fault tolerant processing
# MAGIC - automatic detection of changes in upstream data sources
# MAGIC
# MAGIC If we know the approximate rate at which our data grows, we can appropriately size the cluster we schedule for this job to ensure fast, cost-effective processing. The customer will be able to evaluate how much updating this final aggregate view of their data costs and make informed decisions about how frequently this operation needs to be run.
# MAGIC
# MAGIC Downstream processes subscribing to this table do not need to re-run any expensive aggregations. Rather, files just need to be de-serialized and then queries based on included fields can quickly be pushed down against this already-aggregated source.
# COMMAND ----------
(spark.table("patient_avg")
.writeStream
.format("delta")
.outputMode("complete")
.option("checkpointLocation", dailyAvgCheckpoint)
.trigger(once=True)
.table("daily_patient_avg")
)
# COMMAND ----------
# MAGIC %md
# MAGIC #### Important Considerations for `complete` Output with Delta
# MAGIC
# MAGIC When using `complete` output mode, we rewrite the entire state of our table each time our logic runs. While this is ideal for calculating aggregates, we **cannot** read a stream from this directory, as Structured Streaming assumes data is only being appended in the upstream logic.
# MAGIC
# MAGIC **NOTE**: Certain options can be set to change this behavior, but have other limitations attached. For more details, refer to [Delta Streaming: Ignoring Updates and Deletes](https://docs.databricks.com/delta/delta-streaming.html#ignoring-updates-and-deletes).
# MAGIC
# MAGIC The gold Delta table we have just registered will perform a static read of the current state of the data each time we run the following query.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT * FROM daily_patient_avg
# COMMAND ----------
# MAGIC %md
# MAGIC Note the above table includes all days for all users. If the predicates for our ad hoc queries match the data encoded here, we can push down our predicates to files at the source and very quickly generate more limited aggregate views.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT *
# MAGIC FROM daily_patient_avg
# MAGIC WHERE date BETWEEN "2020-01-17" AND "2020-01-31"
# COMMAND ----------
# MAGIC %md
# MAGIC ## Process Remaining Records
# MAGIC The following cell will land additional files for the rest of 2020 in your source directory. You'll be able to see these process through the first 3 tables in your Delta Lake, but will need to re-run your final query to update your `daily_patient_avg` table, since this query uses the trigger once syntax.
# COMMAND ----------
File.newData(continuous=True)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Wrapping Up
# MAGIC
# MAGIC Finally, make sure all streams are stopped.
# COMMAND ----------
# MAGIC %run "../Includes/multi-hop-setup" $mode="clean"
# COMMAND ----------
# MAGIC %md
# MAGIC ## Summary
# MAGIC
# MAGIC Delta Lake and Structured Streaming combine to provide near real-time analytic access to data in the lakehouse.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Additional Topics & Resources
# MAGIC
# MAGIC * <a href="https://docs.databricks.com/delta/delta-streaming.html" target="_blank">Table Streaming Reads and Writes</a>
# MAGIC * <a href="https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html" target="_blank">Structured Streaming Programming Guide</a>
# MAGIC * <a href="https://www.youtube.com/watch?v=rl8dIzTpxrI" target="_blank">A Deep Dive into Structured Streaming</a> by Tathagata Das. This is an excellent video describing how Structured Streaming works.
# MAGIC * <a href="https://databricks.com/glossary/lambda-architecture" target="_blank">Lambda Architecture</a>
# MAGIC * <a href="https://bennyaustin.wordpress.com/2010/05/02/kimball-and-inmon-dw-models/#" target="_blank">Data Warehouse Models</a>
# MAGIC * <a href="http://spark.apache.org/docs/latest/structured-streaming-kafka-integration.html" target="_blank">Create a Kafka Source Stream</a>
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 40.638806 | 432 | 0.726627 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Using the Delta Live Tables UI
# MAGIC
# MAGIC This demo will explore the DLT UI. By the end of this lesson you will be able to:
# MAGIC
# MAGIC * Deploy a DLT pipeline
# MAGIC * Explore the resultant DAG
# MAGIC * Execute an update of the pipeline
# MAGIC * Look at metrics
# COMMAND ----------
# MAGIC %md
# MAGIC ## Run Setup
# MAGIC
# MAGIC The following cell is configured to reset this demo.
# COMMAND ----------
# MAGIC %run ../Includes/dlt-setup $course="dlt_demo" $mode="reset"
# COMMAND ----------
# MAGIC %md
# MAGIC Execute the following cell to print out two values that will be used during the following configuration steps.
# COMMAND ----------
print(f"Target: {database}")
print(f"Storage location: {userhome.split(':')[1]}")
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create and configure a pipeline
# MAGIC
# MAGIC In this section you will create a pipeline using a notebook provided with the courseware. We'll explore the contents of the notebook in the following lesson.
# MAGIC
# MAGIC 1. Click the **Jobs** button on the sidebar,
# MAGIC 1. Select the **Delta Live Tables** tab.
# MAGIC 1. Click **Create Pipeline**.
# MAGIC 1. Fill in a **Pipeline Name** of your choosing.
# MAGIC 1. For **Notebook Libraries**, use the navigator to locate and select the companion notebook called **3.3.2 - SQL for Delta Live Tables**.
# MAGIC * Though this document is a standard Databricks Notebook, the SQL syntax is specialized to DLT table declarations. We will be exploring the syntax in the exercise that follows.
# MAGIC 1. In the **Target** field, specify the database name printed out next to **Target** in the cell above. (This should follow the pattern `dbacademy_<username>_dlt_demo`)
# MAGIC * This field is optional; if not specified, then tables will not be registered to a metastore, but will still be available in the DBFS. Refer to the <a href="https://docs.databricks.com/data-engineering/delta-live-tables/delta-live-tables-user-guide.html#publish-tables" target="_blank">documentation</a> for more information on this option.
# MAGIC 1. In the **Storage location** field, copy the directory as printed above.
# MAGIC * This optional field allows the user to specify a location to store logs, tables, and other information related to pipeline execution. If not specified, DLT will automatically generate a directory.
# MAGIC 1. For **Pipeline Mode**, select **Triggered**
# MAGIC * This field specifies how the pipeline will be run. **Triggered** pipelines run once and then shut down until the next manual or scheduled update. **Continuous** pipelines run continuously, ingesting new data as it arrives. Choose the mode based on latency and cost requirements.
# MAGIC 1. Uncheck the **Enable autoscaling** box, and set the number of workers to 1.,
# MAGIC * **Enable autoscaling**, **Min Workers** and **Max Workers** control the worker configuration for the underlying cluster processing the pipeline. Notice the DBU estimate provided, similar to that provided when configuring interactive clusters.
# MAGIC 1. Click **Create**.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Run a pipeline
# MAGIC
# MAGIC With a pipeline created, you will now run the pipeline.
# MAGIC
# MAGIC 1. Select **Development** to run the pipeline in development mode. Development mode provides for more expeditious iterative development by reusing the cluster (as opposed to creating a new cluster for each run) and disabling retries so that you can readily identify and fix errors. Refer to the <a href="https://docs.databricks.com/data-engineering/delta-live-tables/delta-live-tables-user-guide.html#optimize-execution" target="_blank">documentation</a> for more information on this feature.
# MAGIC 2. Click **Start**.
# MAGIC
# MAGIC The initial run will take several minutes while a cluster is provisioned. Subsequent runs will be appreciably quicker.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Exploring the DAG
# MAGIC
# MAGIC As the pipeline completes, the execution flow is graphed. Select the tables review the details.
# MAGIC
# MAGIC Select **sales_orders_cleaned**. Notice the results reported in the **Data Quality** section. Because this flow has data expectations declared, those metrics are tracked here. No records are dropped because the constraint is declared in a way that allows violating records to be included in the output. This will be covered in more details in the next exercise.
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 56.163043 | 500 | 0.732408 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md
# MAGIC # Exploring the Results of a DLT Pipeline
# MAGIC
# MAGIC This Notebook explores the execution results of a DLT pipeline.
# COMMAND ----------
# MAGIC %run ../Includes/dlt-setup $course="dlt_demo"
# COMMAND ----------
storage_location = userhome
# COMMAND ----------
dbutils.fs.ls(storage_location)
# COMMAND ----------
# MAGIC %md
# MAGIC The `system` directory captures events associated with the pipeline.
# COMMAND ----------
dbutils.fs.ls(f"{storage_location}/system/events")
# COMMAND ----------
# MAGIC %md
# MAGIC These event logs are stored as a Delta table. Let's query the table.
# COMMAND ----------
display(spark.sql(f"SELECT * FROM delta.`{storage_location}/system/events`"))
# COMMAND ----------
# MAGIC %md
# MAGIC Let's view the contents of the *tables* directory.
# COMMAND ----------
dbutils.fs.ls(f"{storage_location}/tables")
# COMMAND ----------
# MAGIC %md
# MAGIC Let's query the gold table.
# COMMAND ----------
display(spark.sql(f"SELECT * FROM {database}.sales_order_in_la"))
| 18.581818 | 77 | 0.642193 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Using Auto Loader and Structured Streaming with Spark SQL
# MAGIC
# MAGIC ## Learning Objectives
# MAGIC By the end of this lab, you will be able to:
# MAGIC * Ingest data using Auto Loader
# MAGIC * Aggregate streaming data
# MAGIC * Stream data to a Delta table
# COMMAND ----------
# MAGIC %md
# MAGIC ## Setup
# MAGIC Run the following script to setup necessary variables and clear out past runs of this notebook. Note that re-executing this cell will allow you to start the lab over.
# COMMAND ----------
# MAGIC %run ../../Includes/classic-setup $mode="reset"
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Configure Streaming Read
# MAGIC
# MAGIC This lab uses a collection of customer-related CSV data from DBFS found in */databricks-datasets/retail-org/customers/*.
# MAGIC
# MAGIC Read this data using Auto Loader using its schema inference (use `customersCheckpointPath` to store the schema info). Create a streaming temporary view called `customers_raw_temp`.
# COMMAND ----------
# ANSWER
customersCheckpointPath = userhome + "/customersCheckpoint"
(spark
.readStream
.format("cloudFiles")
.option("cloudFiles.format", "csv")
.option("cloudFiles.schemaLocation", customersCheckpointPath)
.load("/databricks-datasets/retail-org/customers/")
.createOrReplaceTempView("customers_raw_temp")
)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Define a streaming aggregation
# MAGIC
# MAGIC Using CTAS syntax, define a new streaming view called `customer_count_by_state_temp` that counts the number of customers per `state`, in a field called `customer_count`.
# COMMAND ----------
# MAGIC %sql
# MAGIC -- ANSWER
# MAGIC CREATE OR REPLACE TEMPORARY VIEW customer_count_by_state_temp AS
# MAGIC SELECT
# MAGIC state,
# MAGIC count(state) AS customer_count
# MAGIC FROM customers_raw_temp
# MAGIC GROUP BY
# MAGIC state
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Write aggregated data to a Delta table
# MAGIC
# MAGIC Stream data from the `customer_count_by_state_temp` view to a Delta table called `customer_count_by_state`.
# COMMAND ----------
# ANSWER
customersCountCheckpointPath = userhome + "/customersCountCheckpoint"
(spark
.table("customer_count_by_state_temp")
.writeStream
.format("delta")
.option("checkpointLocation", customersCountCheckpointPath)
.outputMode("complete")
.table("customer_count_by_state"))
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Query the results
# MAGIC
# MAGIC Query the `customer_count_by_state` table (this will not be a streaming query). Plot the results as a bar graph and also using the map plot.
# COMMAND ----------
# MAGIC %sql
# MAGIC -- ANSWER
# MAGIC SELECT * FROM customer_count_by_state
# COMMAND ----------
# MAGIC %md
# MAGIC ## Wrapping Up
# MAGIC
# MAGIC Run the following cell to remove the database and all data associated with this lab.
# COMMAND ----------
# MAGIC %run ../../Includes/classic-setup $mode="clean"
# COMMAND ----------
# MAGIC %md
# MAGIC By completing this lab, you should now feel comfortable:
# MAGIC * Using PySpark to configure Auto Loader for incremental data ingestion
# MAGIC * Using Spark SQL to aggregate streaming data
# MAGIC * Streaming data to a Delta table
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 29.223881 | 192 | 0.700914 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Propagating Incremental Updates with Structured Streaming and Delta Lake
# MAGIC
# MAGIC ## Learning Objectives
# MAGIC By the end of this lab, you will be able to:
# MAGIC * Apply your knowledge of structured streaming and Auto Loader to implement a simple multi-hop architecture
# COMMAND ----------
# MAGIC %md
# MAGIC ## Setup
# MAGIC Run the following script to setup necessary variables and clear out past runs of this notebook. Note that re-executing this cell will allow you to start the lab over.
# COMMAND ----------
# MAGIC %run ../../Includes/classic-setup $mode="reset"
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Ingest data
# MAGIC
# MAGIC This lab uses a collection of customer-related CSV data from DBFS found in */databricks-datasets/retail-org/customers/*.
# MAGIC
# MAGIC Read this data using Auto Loader using its schema inference (use `customersCheckpointPath` to store the schema info). Stream the raw data to a Delta table called `bronze`.
# COMMAND ----------
# ANSWER
customersCheckpointPath = userhome + "/customersCheckpoint"
(spark
.readStream
.format("cloudFiles")
.option("cloudFiles.format", "csv")
.option("cloudFiles.schemaLocation", customersCheckpointPath)
.load("/databricks-datasets/retail-org/customers/")
.writeStream
.format("delta")
.option("checkpointLocation", customersCheckpointPath)
.outputMode("append")
.table("bronze"))
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC Let's create a streaming temporary view into the bronze table, so that we can perform transforms using SQL.
# COMMAND ----------
(spark
.readStream
.table("bronze")
.createOrReplaceTempView("bronze_temp"))
# COMMAND ----------
# MAGIC %md
# MAGIC ## Clean and enhance data
# MAGIC
# MAGIC Using CTAS syntax, define a new streaming view called `bronze_enhanced_temp` that does the following:
# MAGIC * Skips records with a null `postcode` (set to zero)
# MAGIC * Inserts a column called `receipt_time` containing a current timestamp
# MAGIC * Inserts a column called `source_file` intaining the input filename
# COMMAND ----------
# MAGIC %sql
# MAGIC -- ANSWER
# MAGIC CREATE OR REPLACE TEMPORARY VIEW bronze_enhanced_temp AS
# MAGIC SELECT
# MAGIC *, current_timestamp() receipt_time, input_file_name() source_file
# MAGIC FROM bronze_temp
# MAGIC WHERE postcode > 0
# COMMAND ----------
# MAGIC %md
# MAGIC ## Silver table
# MAGIC
# MAGIC Stream the data from `bronze_enhanced_temp` to a table called `silver`.
# COMMAND ----------
# ANSWER
silverCheckpointPath = userhome + "/silverCheckpoint"
(spark.table("bronze_enhanced_temp")
.writeStream
.format("delta")
.option("checkpointLocation", silverCheckpointPath)
.outputMode("append")
.table("silver"))
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC Let's create a streaming temporary view into the silver table, so that we can perform business-level using SQL.
# COMMAND ----------
(spark
.readStream
.table("silver")
.createOrReplaceTempView("silver_temp"))
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Gold tables
# MAGIC
# MAGIC Using CTAS syntax, define a new streaming view called `customer_count_temp` that counts customers per state.
# COMMAND ----------
# MAGIC %sql
# MAGIC -- ANSWER
# MAGIC CREATE OR REPLACE TEMPORARY VIEW customer_count_by_state_temp AS
# MAGIC SELECT state, count(state) AS customer_count
# MAGIC FROM silver_temp
# MAGIC GROUP BY
# MAGIC state
# COMMAND ----------
# MAGIC %md
# MAGIC Finally, stream the data from the `customer_count_by_state_temp` view to a Delta table called `gold_customer_count_by_state`.
# COMMAND ----------
# ANSWER
customersCountCheckpointPath = userhome + "/customersCountCheckpoint"
(spark
.table("customer_count_by_state_temp")
.writeStream
.format("delta")
.option("checkpointLocation", customersCountCheckpointPath)
.outputMode("complete")
.table("gold_customer_count_by_state"))
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Query the results
# MAGIC
# MAGIC Query the `gold_customer_count_by_state` table (this will not be a streaming query). Plot the results as a bar graph and also using the map plot.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT * FROM gold_customer_count_by_state
# COMMAND ----------
# MAGIC %md
# MAGIC ## Wrapping Up
# MAGIC
# MAGIC Run the following cell to remove the database and all data associated with this lab.
# COMMAND ----------
# MAGIC %run ../../Includes/classic-setup $mode="clean"
# COMMAND ----------
# MAGIC %md
# MAGIC By completing this lab, you should now feel comfortable:
# MAGIC * Using PySpark to configure Auto Loader for incremental data ingestion
# MAGIC * Using Spark SQL to aggregate streaming data
# MAGIC * Streaming data to a Delta table
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 27.747423 | 192 | 0.698888 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Lab: Migrating SQL Notebooks to Delta Live Tables
# MAGIC
# MAGIC This notebook dictates an overall structure for the lab exercise, configures the environment for the lab, provides simulated data streaming, and performs cleanup once you are done. A notebook like this is not typically needed in a production pipeline scenario.
# MAGIC
# MAGIC ## Learning Objectives
# MAGIC By the end of this lesson, you should be able to:
# MAGIC * Convert existing data pipelines to Delta Live Tables
# COMMAND ----------
# MAGIC %md
# MAGIC ## Datasets Used
# MAGIC
# MAGIC This demo uses simplified artificially generated medical data. The schema of our two datasets is represented below. Note that we will be manipulating these schema during various steps.
# MAGIC
# MAGIC #### Recordings
# MAGIC The main dataset uses heart rate recordings from medical devices delivered in the JSON format.
# MAGIC
# MAGIC | Field | Type |
# MAGIC | --- | --- |
# MAGIC | device_id | int |
# MAGIC | mrn | long |
# MAGIC | time | double |
# MAGIC | heartrate | double |
# MAGIC
# MAGIC #### PII
# MAGIC These data will later be joined with a static table of patient information stored in an external system to identify patients by name.
# MAGIC
# MAGIC | Field | Type |
# MAGIC | --- | --- |
# MAGIC | mrn | long |
# MAGIC | name | string |
# COMMAND ----------
# MAGIC %md
# MAGIC ## Getting Started
# MAGIC
# MAGIC Begin by running the following cell to configure the lab environment.
# COMMAND ----------
# MAGIC %run "../../Includes/dlt-setup" $mode="reset"
# COMMAND ----------
# MAGIC %md
# MAGIC ## Land Initial Data
# MAGIC Seed the landing zone with some data before proceeding. You will re-run this command to land additional data later.
# COMMAND ----------
File.newData()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create and Configure a Pipeline
# MAGIC
# MAGIC 1. Click the **Jobs** button on the sidebar, then select the **Delta Live Tables** tab.
# MAGIC 1. Click **Create Pipeline**.
# MAGIC 1. Fill in a **Pipeline Name** of your choosing.
# MAGIC 1. For **Notebook Libraries**, use the navigator to locate and select the notebook `3.3.4 - LAB - Migrating a SQL Pipeline to DLT`.
# MAGIC 1. Run the cell below to generate values for **source**, **Target** and **Storage Location**. (All of these will include your current username).
# MAGIC * Click `Add configuration`; enter the word `source` in the **Key** field and the output printed next to `source` below in the value field.
# MAGIC * Enter the database name printed next to `Target` below in the **Target** field.
# MAGIC * Enter the location printed next to `Storage Location` below in the **Storage Location** field.
# MAGIC 1. Set **Pipeline Mode** to **Triggered**.
# MAGIC 1. Disable autoscaling.
# MAGIC 1. Set the number of wokers to 1.
# MAGIC 1. Click **Create**.
# COMMAND ----------
storage_location = userhome + "/output"
print(f"source : {dataLandingLocation.split(':')[1]}")
print(f"Target: {database}")
print(f"Storage Location: {storage_location.split(':')[1]}")
# COMMAND ----------
# MAGIC %md
# MAGIC ## Run your Pipeline
# MAGIC
# MAGIC Select **Development** mode, which accelerates the development lifecycle by reusing the same cluster across runs. It will also turn off automatic retries when jobs fail.
# MAGIC
# MAGIC Click **Start** to begin the first update to your table.
# MAGIC
# MAGIC Delta Live Tables will automatically deploy all the necessary infrastructure and resolve the dependencies between all datasets.
# MAGIC
# MAGIC **NOTE**: The first table update make take several minutes as relationships are resolved and infrastructure deploys.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Open and Complete DLT Pipeline Notebook
# MAGIC
# MAGIC You will perform your work in this <a href="$./3.3.4 - LAB - Migrating a SQL Pipeline to DLT" target="_blank">companion Notebook</a>, which you will ultimately deploy as a pipeline.
# MAGIC
# MAGIC Open the Notebook and, following the guidelines provided therein, fill in the cells where prompted to implement a multi-hop architecture similar to the one we worked with in the previous section.
# MAGIC
# MAGIC **NOTE**: As a first step to preparing your pipeline, run the following cell to obtain the cloud file location. Substitue this value for the text that reads `<CLOUD_FILES LOCATION>`. This value will be unique within the workspace to your user identity to prevent possible interference between users within the same workspace.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Troubleshooting Code in Development Mode
# MAGIC
# MAGIC Don't despair if your pipeline fails the first time. Delta Live Tables is in active development, and error messages are improving all the time.
# MAGIC
# MAGIC Because relationships between tables are mapped as a DAG, error messages will often indicate that a dataset isn't found.
# MAGIC
# MAGIC Let's consider our DAG below:
# MAGIC
# MAGIC <img src="https://files.training.databricks.com/images/dlt_dag.png" width="400">
# MAGIC
# MAGIC If the error message `Dataset not found: 'recordings_parsed'` is raised, there may be several culprits:
# MAGIC 1. The logic defining `recordings_parsed` is invalid
# MAGIC 1. There is an error reading from `recordings_bronze`
# MAGIC 1. A typo exists in either `recordings_parsed` or `recordings_bronze`
# MAGIC
# MAGIC The safest way to identify the culprit is to iteratively add table/view definitions back into your DAG starting from your initial ingestion tables. You can simply comment out later table/view definitions and uncomment these between runs.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Display Results
# MAGIC
# MAGIC Assuming your pipeline runs successfully, display the contents of the gold table.
# MAGIC
# MAGIC **NOTE**: Because we specified a value for **Target**, tables are published to the specified database. Without a **Target** specification, we would need to query the table based on its underlying location in DBFS (relative to the **Storage Location**).
# COMMAND ----------
spark.sql(f"SELECT * FROM {database}daily_patient_avg")
# COMMAND ----------
# MAGIC %md
# MAGIC Trigger another file arrival with the following cell. Feel free to run it a couple more times if desired. Following this, run the pipeline again and view the results. Feel free to re-run the cell above to gain an updated view of the `daily_patient_avg` table.
# COMMAND ----------
File.newData()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Wrapping Up
# MAGIC
# MAGIC Ensure that you delete your pipeline from the DLT UI, and run the following cell to clean up the files and tables that were created as part of the lab setup and execution.
# COMMAND ----------
# MAGIC %run "../../Includes/dlt-setup" $mode="clean"
# COMMAND ----------
# MAGIC %md
# MAGIC ## Summary
# MAGIC
# MAGIC In this lab, you learned to convert an existing data pipeline to a Delta Live Tables SQL pipeline, and deployed that pipeline using the DLT UI.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Additional Topics & Resources
# MAGIC
# MAGIC * <a href="https://docs.databricks.com/data-engineering/delta-live-tables/index.html" target="_blank">Delta Live Tables Documentation</a>
# MAGIC * <a href="https://youtu.be/6Q8qPZ7c1O0" target="_blank">Delta Live Tables Demo</a>
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 42.010582 | 333 | 0.71469 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Orchestrating Jobs with Databricks
# MAGIC
# MAGIC New updates to the Databricks Jobs UI have added the ability to schedule multiple tasks as part of a job, allowing Databricks Jobs to fully handle orchestration for most production workloads.
# MAGIC
# MAGIC Here, we'll start by reviewing the steps for scheduling a notebook as a triggered standalone job, and then add a dependent job using a DLT pipeline.
# MAGIC
# MAGIC
# MAGIC By the end of this lesson, you should feel confident:
# MAGIC * Scheduling a notebook as a Databricks Job
# MAGIC * Describing job scheduling options and differences between cluster types
# MAGIC * Review Job Runs to track progress and see results
# MAGIC * Scheduling a DLT pipeline as a Databricks Job
# MAGIC * Configuring linear dependencies between tasks using the Databricks Jobs UI
# MAGIC
# MAGIC ## Schedule a Notebook Job
# MAGIC
# MAGIC When using the Jobs UI to orchestrate a workload with multiple tasks, you'll always begin by scheduling a single task.
# MAGIC
# MAGIC Here, we'll start by scheduling the notebook `1 - Reset`.
# MAGIC
# MAGIC Steps:
# MAGIC 1. Navigate to the Jobs UI using the Databricks left side navigation bar.
# MAGIC 1. Click the blue `Create Job` button
# MAGIC 1. Configure the task:
# MAGIC 1. Enter `reset` for the task name
# MAGIC 1. Select the notebook `1 - Reset` using the notebook picker
# MAGIC 1. Select an Existing All Purpose Cluster from the **Cluster** dropdown
# MAGIC 1. Click **Create**
# MAGIC
# MAGIC **Note**: When selecting your all purpose cluster, you will get a warning about how this will be billed as all purpose compute. Production jobs should always be scheduled against new job clusters appropriately sized for the workload, as this is billed at a much lower rate.
# MAGIC
# MAGIC Click the blue **Run now** button in the top right to start the job.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Review Run
# MAGIC
# MAGIC As currently scheduled, our single notebook provides identical performance to the legacy Databricks Jobs UI, which only allowed a single notebook to be scheduled.
# MAGIC
# MAGIC From the **Runs** tab, clicking on the start time field will display a preview of the notebook with results. If the job is still running, this will be under **Active Runs**, and the displayed notebook will ocassionaly update to show progress throughout execution. If it has already completed, it will be under **Completed Runs** and just display the static results of the executed notebook.
# MAGIC
# MAGIC The notebook scheduled using the magic command `%run` to call an additional notebook using a relative path. Note that while not covered in this course, [new functionality added to Databricks Repos allows loading Python modules using relative paths](https://docs.databricks.com/repos.html#work-with-non-notebook-files-in-a-databricks-repo).
# MAGIC
# MAGIC The actual outcome of the scheduled notebook is to reset the output of the DLT pipeline configured earlier in the course, as well as to print out the necessary variables used to configure this pipeline for users that may not have coded along previously.
# MAGIC
# MAGIC Before continuing to the next step, make sure you either have access to a
# COMMAND ----------
# MAGIC %md
# MAGIC ## Chron Scheduling of Databricks Jobs
# MAGIC
# MAGIC Note that on the right hand side of the Jobs UI, directly under the **Job Details** section is a section labeled **Schedule**.
# MAGIC
# MAGIC Click on the **Edit schedule** button to explore scheduling options.
# MAGIC
# MAGIC Changing the **Schedule type** field from **Manual** to **Scheduled** will bring up a chron scheduling UI.
# MAGIC
# MAGIC This UI provides extensive options for setting up chronological scheduling of your Jobs. Settings configured with the UI can also be output in cron syntax, which can be editted if custom configuration not available with the UI is needed.
# MAGIC
# MAGIC At this time, we'll leave our job set with **Manual** scheduling.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Schedule a DLT Pipeline as a Task
# MAGIC
# MAGIC In this step, we'll add a DLT pipeline to execute after the success of the task we configured in the previous step.
# MAGIC
# MAGIC **NOTE**: This step assumes that the DLT pipeline describe in the lab for module 3 of this course was configured successfully. If this is not the case, instructions are included for configuring this DLT pipeline in the run output of the `reset` notebook executed above.
# MAGIC
# MAGIC Steps:
# MAGIC 1. Navigate to the Jobs UI using the Databricks left side navigation bar
# MAGIC 1. Select the job you defined above by clicking on the name
# MAGIC 1. At the top left of your screen, you'll see the **Runs** tab is currently selected; click the **Tasks** tab.
# MAGIC 1. Click the large blue circle with a **+** at the center bottom of the screen to add a new task
# MAGIC 1. Specify the **Task name** as `dlt`
# MAGIC 1. From **Type**, select `Pipeline`
# MAGIC 1. Click the **Pipeline** field and select the DLT pipeline you configured previously
# MAGIC 1. Note that the **Depends on** field defaults to your previously defined task
# MAGIC 1. Click the blue **Create task** button
# MAGIC
# MAGIC You should now see a screen with 2 boxes and a downward arrow between them. Your `reset` task will be at the top, leading into your `dlt` task. This visualization represents the dependencies between these tasks.
# MAGIC
# MAGIC Click **Run now** to execute your job.
# MAGIC
# MAGIC **NOTE**: You may need to wait a few minutes as infrastructure for your DLT pipeline is deployed.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Review Multi-Task Run Results
# MAGIC
# MAGIC Clicking into the job run will replicate the UI showing both tasks. The visualizations for tasks will update in real time to reflect which tasks are actively running, and will change colors if task failure occur. Clicking on a task box will render the scheduled notebook in the UI. (You can think of this as just an additional layer of orchestration on top of the previous Databricks Jobs UI, if that helps; note that if you have workloads scheduling jobs with the CLI or REST API, [the JSON structure used to configure and get results about jobs has seen similar updates to the UI](https://docs.databricks.com/dev-tools/api/latest/jobs.html)).
# MAGIC
# MAGIC **NOTE**: At this time, DLT pipelines scheduled as tasks do not directly render results in the Runs GUI; instead, you will be directed back to the DLT Pipeline GUI for the scheduled Pipeline.
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 62.801724 | 652 | 0.746757 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %run ../../Includes/dlt-setup $course="dlt_demo" $mode="reset"
# COMMAND ----------
# MAGIC %md
# MAGIC If you have not previously configured this DLT pipeline successfully, the following cell prints out two values that will be used during the configuration steps that follow.
# COMMAND ----------
print(f"Target: {database}")
print(f"Storage location: {userhome.split(':')[1]}")
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create and configure a pipeline
# MAGIC
# MAGIC The instructions below refer to the same pipeline created during the previous codealong for DLT; if you successfully configured this notebook previously, you should not need to reconfigure this pipeline now.
# MAGIC
# MAGIC
# MAGIC Steps:
# MAGIC 1. Click the **Jobs** button on the sidebar,
# MAGIC 1. Select the **Delta Live Tables** tab.
# MAGIC 1. Click **Create Pipeline**.
# MAGIC 1. Fill in a **Pipeline Name** of your choosing.
# MAGIC 1. For **Notebook Libraries**, use the navigator to locate and select the companion notebook called **2 - DLT Job**.
# MAGIC 1. In the **Target** field, specify the database name printed out next to **Target** in the cell above. (This should follow the pattern `dbacademy_<username>_dlt_demo`)
# MAGIC 1. In the **Storage location** field, copy the directory as printed above.
# MAGIC 1. For **Pipeline Mode**, select **Triggered**
# MAGIC 1. Uncheck the **Enable autoscaling** box, and set the number of workers to 1.,
# MAGIC 1. Click **Create**.
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 45.204082 | 215 | 0.70084 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Orchestrating Jobs with Databricks
# MAGIC
# MAGIC In this lab, you'll be configuring a multi-task job comprising of:
# MAGIC * A notebook that lands a new batch of data in a storage directory
# MAGIC * A Delta Live Table pipeline that processes this data through a series of tables
# MAGIC * A notebook that queries the gold table produced by this pipeline as well as various metrics output by DLT
# MAGIC
# MAGIC By the end of this lab, you should feel confident:
# MAGIC * Scheduling a notebook as a Databricks Job
# MAGIC * Scheduling a DLT pipeline as a Databricks Job
# MAGIC * Configuring linear dependencies between tasks using the Databricks Jobs UI
# MAGIC
# MAGIC ## Schedule a Notebook Job
# MAGIC
# MAGIC When using the Jobs UI to orchestrate a workload with multiple tasks, you'll always begin by scheduling a single task.
# MAGIC
# MAGIC Here, we'll start by scheduling the notebook `1 - Batch Job`.
# MAGIC
# MAGIC Steps:
# MAGIC 1. Navigate to the Jobs UI using the Databricks left side navigation bar.
# MAGIC 1. Click the blue `Create Job` button
# MAGIC 1. Configure the task:
# MAGIC 1. Enter `Batch-Job` for the task name
# MAGIC 1. Select the notebook `1 - Batch Job` using the notebook picker
# MAGIC 1. Select an Existing All Purpose Cluster from the **Cluster** dropdown
# MAGIC 1. Click **Create**
# MAGIC
# MAGIC **Note**: When selecting your all purpose cluster, you will get a warning about how this will be billed as all purpose compute. Production jobs should always be scheduled against new job clusters appropriately sized for the workload, as this is billed at a much lower rate.
# MAGIC
# MAGIC Click the blue **Run now** button in the top right to confirm that you have successfully configured this task. From the **Runs** tab, clicking on the start time field will pull up the notebook with results.
# MAGIC
# MAGIC ## Schedule a DLT Pipeline as a Task
# MAGIC
# MAGIC In this step, we'll add a DLT pipeline to execute after the success of the task we configured in the previous step.
# MAGIC
# MAGIC **NOTE**: This step assumes that the DLT pipeline describe in the lab for module 3 of this course was configured successfully. If this is not the case, instructions are included for configuring this DLT pipeline in the run output of the `Batch-Job` notebook executed above.
# MAGIC
# MAGIC Steps:
# MAGIC 1. Navigate to the Jobs UI using the Databricks left side navigation bar
# MAGIC 1. Select the job you defined above by clicking on the name (this should have the name `Batch-Job`)
# MAGIC 1. At the top left of your screen, you'll see the **Runs** tab is currently selected; click the **Tasks** tab.
# MAGIC 1. Click the large blue circle with a **+** at the center bottom of the screen to add a new task
# MAGIC 1. Specify the **Task name** as `DLT-Pipeline`
# MAGIC 1. From **Type**, select `Pipeline`
# MAGIC 1. Click the **Pipeline** field and select the DLT pipeline you configured previously
# MAGIC 1. Note that the **Depends on** field defaults to your previously defined task
# MAGIC 1. Click the blue **Create task** button
# MAGIC
# MAGIC You should now see a screen with 2 boxes and a downward arrow between them. Your `Batch-Job` task will be at the top, leading into your `DLT-Pipeline` task. This visualization represents the dependencies between these tasks.
# MAGIC
# MAGIC Before clicking **Run now**, click the job name in the top left and provide something unique and descriptive, like `<your_initials>-MTJ-lab`
# MAGIC
# MAGIC **NOTE**: You may need to wait a few minutes as infrastructure for your DLT pipeline is deployed. Feel free to skip clicking **Run now** until the next task is configured if you don't want to wait.
# MAGIC
# MAGIC ## Schedule an Additional Notebook Task
# MAGIC
# MAGIC An additional notebook has been provided which queries some of the DLT metrics and the gold table defined in the DLT pipeline. We'll add this as a final task in our job.
# MAGIC
# MAGIC Steps:
# MAGIC 1. Navigate to the **Tasks** tab of the job you've been configuring
# MAGIC 1. Click the blue **+** button to add another task
# MAGIC 1. Specify the **Task name** as `Query-Results`
# MAGIC 1. Leave the **Type** set to `Notebook`
# MAGIC 1. Select the notebook `3 - Query Results Job` using the notebook picker
# MAGIC 1. Note that the **Depends on** field defaults to your previously defined task
# MAGIC 1. Select an Existing All Purpose Cluster from the **Cluster** dropdown
# MAGIC 1. Click the blue **Create task** button
# MAGIC
# MAGIC Click the blue **Run now** button in the top right of the screen to run this job.
# MAGIC
# MAGIC From the **Runs** tab, you will be able to click on the start time for this run under the **Active runs** section and visually track task progress.
# MAGIC
# MAGIC Once all your tasks have succeeded, review the contents of each task to confirm expected behavior.
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 61.5 | 281 | 0.732127 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %run ../../Includes/dlt-setup
# COMMAND ----------
File.newData()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create and Configure a Pipeline
# MAGIC
# MAGIC **NOTE**: This lab is configured to work with the DLT pipeline completed as part of the DLT lab in the previous module. If you have not successfully completed this lab, follow the instructions below to configure a pipeline using specified notebook.
# MAGIC
# MAGIC Instructions for configuring DLT pipeline:
# MAGIC 1. Click the **Jobs** button on the sidebar, then select the **Delta Live Tables** tab.
# MAGIC 1. Click **Create Pipeline**.
# MAGIC 1. Fill in a **Pipeline Name** of your choosing.
# MAGIC 1. For **Notebook Libraries**, use the navigator to locate and select the notebook `4.1.2 - DLT Job`.
# MAGIC 1. Run the cell below to generate values for **source**, **Target** and **Storage Location**. (All of these will include your current username).
# MAGIC * Click `Add configuration`; enter the word `source` in the **Key** field and the output printed next to `source` below in the value field.
# MAGIC * Enter the database name printed next to `Target` below in the **Target** field.
# MAGIC * Enter the location printed next to `Storage Location` below in the **Storage Location** field.
# MAGIC 1. Set **Pipeline Mode** to **Triggered**.
# MAGIC 1. Disable autoscaling.
# MAGIC 1. Set the number of wokers to 1.
# MAGIC 1. Click **Create**.
# COMMAND ----------
storage_location = userhome + "/output"
print(f"source : {dataLandingLocation.split(':')[1]}")
print(f"Target: {database}")
print(f"Storage Location: {storage_location.split(':')[1]}")
| 45.638889 | 256 | 0.70143 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %run ../../Includes/dlt-setup
# COMMAND ----------
# MAGIC %md
# MAGIC # Exploring the Results of a DLT Pipeline
# MAGIC
# MAGIC This Notebook explores the execution results of a DLT pipeline. Before proceeding, you will need one piece of information specific to your pipeline instance: the location in DBFS where results are stored. Because we did not specify a value for **Storage Location** when creating the pipeline, DLT automatically created a folder for us. Obtain this information as follows.
# MAGIC
# MAGIC Click **Settings** on the **Pipeline Details** page. This provides a JSON representation of the pipeline configuration. Copy the value specified for **storage** and substitute for `<storage>` throughout the rest of this Notebook.
# MAGIC
# MAGIC <img src="https://files.training.databricks.com/images/icon_hint_24.png"/> Generally, and particularly in production systems, you will specify **Storage Location** in your pipeline configurations to have full control of where pipeline results are stored.
# COMMAND ----------
storage_location = userhome + "/output"
# COMMAND ----------
dbutils.fs.ls(storage_location)
# COMMAND ----------
# MAGIC %md
# MAGIC The `system` directory captures events associated with the pipeline.
# COMMAND ----------
dbutils.fs.ls(f"{storage_location}/system/events")
# COMMAND ----------
# MAGIC %md
# MAGIC These event logs are stored as a Delta table. Let's query the table.
# COMMAND ----------
display(spark.sql(f"SELECT * FROM delta.`{storage_location}/system/events`"))
# COMMAND ----------
# MAGIC %md
# MAGIC Let's view the contents of the *tables* directory.
# COMMAND ----------
dbutils.fs.ls(f"{storage_location}/tables")
# COMMAND ----------
# MAGIC %md
# MAGIC Let's query the gold table.
# COMMAND ----------
display(spark.sql(f"SELECT * FROM {database}.daily_patient_avg"))
# COMMAND ----------
database
| 29.68254 | 379 | 0.700311 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Navigating Databricks SQL and Attaching to Endpoints
# MAGIC
# MAGIC * Navigate to Databricks SQL
# MAGIC * Make sure that SQL is selected from the workspace option in the sidebar (directly below the Databricks logo)
# MAGIC * Make sure a SQL endpoint is on and accessible
# MAGIC * Navigate to SQL enpoints in the sidebar
# MAGIC * If a SQL endpoint exists and has the State `Running`, you'll use this endpoint
# MAGIC * If a SQL endpoint exists but is `Stopped`, click the `Start` button if you have this option (**NOTE**: Start the smallest endpoint you have available to you)
# MAGIC * If no endpoints exist and you have the option, click `Create SQL Endpoint`; name the endpoint something you'll recognize and set the cluster size to 2X-Small. Leave all other options as default.
# MAGIC * If you have no way to create or attach to a SQL endpoint, you'll need to contact a workspace administrator and request access to compute resources in Databricks SQL to continue.
# MAGIC * Navigate to home page in Databricks SQL
# MAGIC * Click the Databricks logo at the top of the side nav bar
# MAGIC * Locate the **Sample dashboards** and click `Visit gallery`
# MAGIC * Click `Import` next to the **Retail Revenue & Supply Chain** option
# MAGIC * Assuming you have a SQL endpoint available, this should load a dashboard and immediately display results
# MAGIC * Click **Refresh** in the top right (the underlying data has not changed, but this is the button that would be used to pick up changes)
# MAGIC
# MAGIC # Updating a DBSQL Dashboard
# MAGIC
# MAGIC * Use the sidebar navigator to find the **Dashboards**
# MAGIC * Locate the sample dashboard you just loaded; it should be called **Retail Revenue & Supply Chain** and have your username under the `Created By` field. **NOTE**: the **My Dashboards** option on the right hand side can serve as a shortcut to filter out other dashboards in the workspace
# MAGIC * Click on the dashboard name to view it
# MAGIC * View the query behind the **Shifts in Pricing Priorities** plot
# MAGIC * Hover over the plot; three vertical dots should appear. Click on these
# MAGIC * Select **View Query** from the menu that appears
# MAGIC * Review the SQL code used to populate this plot
# MAGIC * Note that 3 tier namespacing is used to identify the source table; this is a preview of new functionality to be supported by Unity Catalog
# MAGIC * Click `Run` in the top right of the screen to preview the results of the query
# MAGIC * Review the visualization
# MAGIC * Under the query, a tab named **Table** should be selected; click **Price by Priority over Time** to switch to a preview of your plot
# MAGIC * Click **Edit Visualization** at the bottom of the screen to review settings
# MAGIC * Explore how changing settings impacts your visualization
# MAGIC * If you wish to apply your changes, click **Save**; otherwise, click **Cancel**
# MAGIC * Back in the query editor, click the **Add Visualization** button to the right of the visualization name
# MAGIC * Create a bar graph
# MAGIC * Set the **X Column** as `Date`
# MAGIC * Set the **Y Column** as `Total Price`
# MAGIC * **Group by** `Priority`
# MAGIC * Set **Stacking** to `Stack`
# MAGIC * Leave all other settings as defaults
# MAGIC * Click **Save**
# MAGIC * Back in the query editor, click the default name for this visualization to edit it; change the visualization name to `Stacked Price`
# MAGIC * Add the bottom of the screen, click the three vertical dots to the left of the `Edit Visualization` button
# MAGIC * Select **Add to Dashboard** from the menu
# MAGIC * Select your `Retail Revenue & Supply Chain` dashboard
# MAGIC * Navigate back to your dashboard to view this change
# MAGIC
# MAGIC # Create a New Query
# MAGIC
# MAGIC * Use the sidebar to navigate to **Queries**
# MAGIC * Click the `Create Query` button
# MAGIC * In the **Schema Browser**, click on the current metastore and select `samples`
# MAGIC * Select the `tpch` database
# MAGIC * Click on the `partsupp` table to get a preview of the schema
# MAGIC * While hovering over the `partsupp` table name, click the `>>` button to insert the table name into your query text
# MAGIC * Write your first query:
# MAGIC * `SELECT * FROM` the `partsupp` table using the full name imported in the last step; click **Run** to preview results
# MAGIC * Modify this query to `GROUP BY ps_partkey` and return the `ps_partkey` and `sum(ps_availqty)`; click **Run** to preview results
# MAGIC * Update your query to alias the 2nd column to be named `total_availqty` and re-execute the query
# MAGIC * Save your query
# MAGIC * Click the **Save** button next to **Run** near the top right of the screen
# MAGIC * Give the query a name you'll remember
# MAGIC * Add the query to your dashboard
# MAGIC * Click the three vertical buttons at the bottom of the screen
# MAGIC * Click **Add to Dashboard**
# MAGIC * Select your `Retail Revenue & Supply Chain` dashboard
# MAGIC * Navigate back to your dashboard to view this change
# MAGIC * If you wish to change the organization of visualizations, click the three vertical buttons in the top right of the screen; click **Edit** in the menu that appears and you'll be able to drag and resize visualizations
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 69.321839 | 297 | 0.726827 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Managing Permissions for Databases, Tables, and Views
# MAGIC
# MAGIC The instructions as detailed below are provided for groups of users to explore how Table ACLs on Databricks work. It leverages Databricks SQL and the Data Explorer to accomplish these tasks, and assumes that at least one user in the group has administrator status (or that an admin has previously configured permissions to allow proper permissions for users to create databases, tables, and views).
# MAGIC
# MAGIC As written, these instructions are for the admin user to complete. The following notebook will have a similar exercise for users to complete in pairs.
# MAGIC
# MAGIC By the end of this lesson, you should be able to:
# MAGIC * Describe the default permissions for users and admins in DBSQL
# MAGIC * Identify the default owner for databases, tables, and views created in DBSQL and change ownership
# MAGIC * Use Data Explorer to navigate relational entities
# MAGIC * Configure permissions for tables and views with Data Explorer
# MAGIC * Configure minimal permissions to allow for table discovery and querying
# COMMAND ----------
# MAGIC %md
# MAGIC ## Generate Setup Statements
# MAGIC
# MAGIC The following cell uses Python to extract username of the present user and format this into several statements used to create databases, tables, and views.
# MAGIC
# MAGIC Only the admin needs to execute the following cell. Successful execution will print out a series of formatted SQL queries, which can be copied into the DBSQL query editor and executed.
# COMMAND ----------
def generate_query(course, mode="reset"):
import re
username = spark.sql("SELECT current_user()").first()[0]
userhome = f"dbfs:/user/{username}/{course}"
database = f"""dbacademy_{re.sub("[^a-zA-Z0-9]", "_", username)}_{course}"""
if mode == "reset":
spark.sql(f"DROP DATABASE IF EXISTS {database} CASCADE")
dbutils.fs.rm(userhome, True)
print(f"""
CREATE DATABASE IF NOT EXISTS {database}
LOCATION '{userhome}';
USE {database};
CREATE TABLE users
(id INT, name STRING, value DOUBLE, state STRING);
INSERT INTO users
VALUES (1, "Yve", 1.0, "CA"),
(2, "Omar", 2.5, "NY"),
(3, "Elia", 3.3, "OH"),
(4, "Rebecca", 4.7, "TX"),
(5, "Ameena", 5.3, "CA"),
(6, "Ling", 6.6, "NY"),
(7, "Pedro", 7.1, "KY");
CREATE VIEW ny_users_vw
AS SELECT * FROM users WHERE state = 'NY';
""")
generate_query("acls_demo")
# COMMAND ----------
# MAGIC %md
# MAGIC Steps:
# MAGIC * Run the cell above
# MAGIC * Copy the entire output to your clipboard
# MAGIC * Navigate to the Databricks SQL workspace
# MAGIC * Make sure that a DBSQL endpoint is running
# MAGIC * Use the left sidebar to select the **SQL Editor**
# MAGIC * Paste the query above and click the blue **Run** in the top right
# MAGIC
# MAGIC **NOTE**: You will need to be connected to a DBSQL endpoint to execute these queries successfully. If you cannot connect to a DBSQL endpoint, you will need to contact your administrator to give you access.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Using Data Explorer
# MAGIC
# MAGIC * Use the left sidebar navigator to select the **Data** tab; this places you in the **Data Explorer**
# MAGIC
# MAGIC ## What is the Data Explorer?
# MAGIC
# MAGIC The data explorer allows users and admins to:
# MAGIC * Navigate databases, tables, and views
# MAGIC * Explore data schema, metadata, and history
# MAGIC * Set and modify permissions of relational entities
# MAGIC
# MAGIC Note that at the moment these instructions are being written, Unity Catalog is not yet generally available. The 3 tier namespacing functionality it adds can be previewed to an extent by switching between the default `hive_metastore` and the `sample` catalog used for example dashboards and queries. Expect the Data Explorer UI and functionality to evolve as Unity Catalog is added to workspaces.
# MAGIC
# MAGIC ## Configuring Permissions
# MAGIC
# MAGIC By default, admins will have the ability to view all objects registered to the metastore and will be able to control permissions for other users in the workspace. Users will default to having **no** permissions on anything registered to the metastore, other than objects that they create in DBSQL; note that before users can create any databases, tables, or views, they must have create and usage privileges specifically granted to them.
# MAGIC
# MAGIC Generally, permissions will be set using Groups that have been configured by an administrator, often by importing organizational structures from SCIM integration with a different identity provider. This lesson will explore Access Control Lists (ACLs) used to control permissions, but will use individuals rather than groups.
# MAGIC
# MAGIC ## Table ACLs
# MAGIC
# MAGIC Databricks allows you to configure permissions for the following objects:
# MAGIC
# MAGIC | Object | Scope |
# MAGIC | --- | --- |
# MAGIC | CATALOG | controls access to the entire data catalog. |
# MAGIC | DATABASE | controls access to a database. |
# MAGIC | TABLE | controls access to a managed or external table. |
# MAGIC | VIEW | controls access to SQL views. |
# MAGIC | FUNCTION | controls access to a named function. |
# MAGIC | ANY FILE | controls access to the underlying filesystem. Users granted access to ANY FILE can bypass the restrictions put on the catalog, databases, tables, and views by reading from the file system directly. |
# MAGIC
# MAGIC **NOTE**: At present, the `ANY FILE` object cannot be set from Data Explorer.
# MAGIC
# MAGIC ## Granting Privileges
# MAGIC
# MAGIC Databricks admins and object owners can grant privileges according to the following rules:
# MAGIC
# MAGIC | Role | Can grant access privileges for |
# MAGIC | --- | --- |
# MAGIC | Databricks administrator | All objects in the catalog and the underlying filesystem. |
# MAGIC | Catalog owner | All objects in the catalog. |
# MAGIC | Database owner | All objects in the database. |
# MAGIC | Table owner | Only the table (similar options for views and functions). |
# MAGIC
# MAGIC **NOTE**: At present, Data Explorer can only be used to modify ownership of databases, tables, and views. Catalog permissions can be set interactively with the SQL Query Editor.
# MAGIC
# MAGIC ## Privileges
# MAGIC
# MAGIC The following privileges can be configured in Data Explorer:
# MAGIC
# MAGIC | Privilege | Ability |
# MAGIC | --- | --- |
# MAGIC | ALL PRIVILEGES | gives all privileges (is translated into all the below privileges). |
# MAGIC | SELECT | gives read access to an object. |
# MAGIC | MODIFY | gives ability to add, delete, and modify data to or from an object. |
# MAGIC | READ_METADATA | gives ability to view an object and its metadata. |
# MAGIC | USAGE | does not give any abilities, but is an additional requirement to perform any action on a database object. |
# MAGIC | CREATE | gives ability to create an object (for example, a table in a database). |
# COMMAND ----------
# MAGIC %md
# MAGIC ## Review the Default Permissions
# MAGIC In the Data Explorer, find the database you created earlier (this should follow the pattern `dbacademy_<username>_acls_demo`).
# MAGIC
# MAGIC Clicking on the database name should display a list of the contained tables and views on the left hand side. On the right, you'll see some details about the database, including the **Owner** and **Location**.
# MAGIC
# MAGIC Click the **Permissions** tab to review who presently has permissions (depending on your workspace configuration, some permissions may have been inherited from settings on the catalog).
# COMMAND ----------
# MAGIC %md
# MAGIC ## Assigning Ownership
# MAGIC
# MAGIC Click the blue pencil next to the **Owner** field. Note that an owner can be set as an individual OR a group. For most implementations, having one or several small groups of trusted power users as owners will limit admin access to important datasets while ensuring that a single user does not create a choke point in productivity.
# MAGIC
# MAGIC Here, we'll set the owner to **Admins**, which is a default group containing all workspace administrators.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Change Database Permissions
# MAGIC
# MAGIC Begin by allowing all users to review metadata about the database.
# MAGIC
# MAGIC Step:
# MAGIC 1. Make sure you have the **Permissions** tab selected for the database
# MAGIC 1. Click the blue **Grant** button
# MAGIC 1. Select the **USAGE** and **READ_METADATA** options
# MAGIC 1. Select the **All Users** group from the drop down menu at the top
# MAGIC 1. Click **OK**
# MAGIC
# MAGIC Note that users may need to refresh their view to see these permissions updated. Updates should be reflected for users in near real time for both the Data Explorer and the SQL Editor.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Change View Permissions
# MAGIC
# MAGIC While users can now see information about this database, they won't be able to interact with the table of view declared above.
# MAGIC
# MAGIC Let's start by giving users the ability to query our view.
# MAGIC
# MAGIC Step:
# MAGIC 1. Select the `ny_users_vw`
# MAGIC 1. Select the **Permissions** tab
# MAGIC * Users should have inherited the permissions granted at the database level; you'll be able to see which permissions users currently have on an asset, as well as where that permission is inherited from
# MAGIC 1. Click the blue **Grant** button
# MAGIC 1. Select the **SELECT** and **READ_METADATA** options
# MAGIC * **READ_METADATA** is technically redundant, as users have already inherited this from the database. However, granting it at the view level allows us to ensure users still have this permission even if the database permissions are revoked
# MAGIC 1. Select the **All Users** group from the drop down menu at the top
# MAGIC 1. Click **OK**
# COMMAND ----------
# MAGIC %md
# MAGIC ## Run a Query to Confirm
# MAGIC
# MAGIC In the **SQL Editor**, all users should use the **Schema Browser** on the lefthand side to navigate to the database being controlled by the admin.
# MAGIC
# MAGIC Users should start a query by typing `SELECT * FROM ` and then click the **>>** that appears while hovering over the view name to insert it into their query.
# MAGIC
# MAGIC This query should return 2 results.
# MAGIC
# MAGIC **NOTE**: This view is defined against the `users` table, which has not had any permissions set yet. Note that users have access only to that portion of the data that passes through the filters defined on the view; this pattern demonstrates how a single underlying table can be used to drive controlled access to data for relevant stakeholders.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Change Table Permissions
# MAGIC
# MAGIC Perform the same steps as above, but now for the `users` table.
# MAGIC
# MAGIC Step:
# MAGIC 1. Select the `users` table
# MAGIC 1. Select the **Permissions** tab
# MAGIC 1. Click the blue **Grant** button
# MAGIC 1. Select the **SELECT** and **READ_METADATA** options
# MAGIC 1. Select the **All Users** group from the drop down menu at the top
# MAGIC 1. Click **OK**
# COMMAND ----------
# MAGIC %md
# MAGIC ## Have Users Attempt to `DROP TABLE`
# MAGIC
# MAGIC In the **SQL Editor**, encourage users to explore the data in this table.
# MAGIC
# MAGIC Encourage users to try to modify the data here; assuming permissions were set correctly, these commands should error out.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create a Database for Derivative Datasets
# MAGIC
# MAGIC In most cases users will need a location to save out derivative datasets. At present, users may not have the ability to create new tables in any location (depending on existing ACLs in the workspace and databases created during previous lessons students have completed).
# MAGIC
# MAGIC The cell below prints out the code to generate a new database and grant permissions to all users.
# MAGIC
# MAGIC **NOTE**: Here we set permissions using the SQL Editor rather than the Data Explorer. You can review the Query History to note that all of our previous permission changes from Data Explorer were executed as SQL queries and logged here (additionally, most actions in the Data Explorer are logged with the corresponding SQL query used to populate the UI fields).
# COMMAND ----------
import re
username = spark.sql("SELECT current_user()").first()[0]
database = f"""dbacademy_{re.sub("[^a-zA-Z0-9]", "_", username)}_derivative"""
print(f"""
CREATE DATABASE {database};
GRANT USAGE, READ_METADATA, CREATE, MODIFY, SELECT ON DATABASE `{database}` TO `users`;
SHOW GRANT ON DATABASE `{database}`
""")
# COMMAND ----------
# MAGIC %md
# MAGIC ## Have Users Create New Tables or Views
# MAGIC
# MAGIC Give users a moment to test that they can create tables and views in this new database.
# MAGIC
# MAGIC **NOTE**: because users were also granted **MODIFY** and **SELECT** permissions, all users will immediately be able to query and modify entities created by their peers.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Admin Configuration
# MAGIC
# MAGIC At present, users do not have any Table ACL permissions granted on the default catalog `hive_metastore` by default. The next lab assumes that users will be able to create databases.
# MAGIC
# MAGIC To enable the ability to create databases and tables in the default catalog using Databricks SQL, have a workspace admin run the following command in the DBSQL query editor:
# MAGIC
# MAGIC ```
# MAGIC GRANT usage, create ON CATALOG `hive_metastore` TO `users`
# MAGIC ```
# MAGIC
# MAGIC To confirm this has run successfully, execute the following query:
# MAGIC
# MAGIC ```
# MAGIC SHOW GRANT ON CATALOG `hive_metastore`
# MAGIC ```
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 48.063973 | 445 | 0.729874 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Configuring Privileges for Production Data and Derived Tables
# MAGIC
# MAGIC The instructions as detailed below are provided for pairs of users to explore how Table ACLs on Databricks work. It leverages Databricks SQL and the Data Explorer to accomplish these tasks, and assumes that neither user has admin privileges for the workspace. An admin will need to have previously granted `CREATE` and `USAGE` privileges on a catalog for users to be able to create databases in Databricksd SQL
# MAGIC
# MAGIC By the end of this lesson, you should be able to:
# MAGIC * Use Data Explorer to navigate relational entities
# MAGIC * Configure permissions for tables and views with Data Explorer
# MAGIC * Configure minimal permissions to allow for table discovery and querying
# MAGIC * Change ownership for databases, tables, and views created in DBSQL
# COMMAND ----------
# MAGIC %md
# MAGIC ## Exchange User Names with your Partner
# MAGIC If you are not in a workspace where you usernames correspond with your email address, make sure your partner has your username. They will need this when assigning privileges and searching for your database at later steps.
# MAGIC
# MAGIC The following query will print your username.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT current_user()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Generate Setup Statements
# MAGIC
# MAGIC The following cell uses Python to extract username of the present user and format this into several statements used to create databases, tables, and views.
# MAGIC
# MAGIC Both students should execute the following cell. Successful execution will print out a series of formatted SQL queries, which can be copied into the DBSQL query editor and executed.
# COMMAND ----------
def generate_query(course, mode="reset"):
import re
import random
username = spark.sql("SELECT current_user()").first()[0]
userhome = f"dbfs:/user/{username}/{course}"
database = f"""dbacademy_{re.sub("[^a-zA-Z0-9]", "_", username)}_{course}"""
if mode == "reset":
spark.sql(f"DROP DATABASE IF EXISTS {database} CASCADE")
dbutils.fs.rm(userhome, True)
print(f"""
CREATE DATABASE IF NOT EXISTS {database}
LOCATION '{userhome}';
USE {database};
CREATE TABLE beans
(name STRING, color STRING, grams FLOAT, delicious BOOLEAN);
INSERT INTO beans
VALUES ('black', 'black', {random.uniform(0, 5000):.2f}, {random.choice(["true", "false"])}),
('lentils', 'brown', {random.uniform(0, 5000):.2f}, {random.choice(["true", "false"])}),
('jelly', 'rainbow', {random.uniform(0, 5000):.2f}, {random.choice(["true", "false"])}),
('pinto', 'brown', {random.uniform(0, 5000):.2f}, {random.choice(["true", "false"])}),
('green', 'green', {random.uniform(0, 5000):.2f}, {random.choice(["true", "false"])}),
('beanbag chair', 'white', {random.uniform(0, 5000):.2f}, {random.choice(["true", "false"])}),
('lentils', 'green', {random.uniform(0, 5000):.2f}, {random.choice(["true", "false"])}),
('kidney', 'red', {random.uniform(0, 5000):.2f}, {random.choice(["true", "false"])}),
('castor', 'brown', {random.uniform(0, 5000):.2f}, {random.choice(["true", "false"])});
CREATE VIEW tasty_beans
AS SELECT * FROM beans WHERE delicious = true;
""")
generate_query("acls_lab")
# COMMAND ----------
# MAGIC %md
# MAGIC Steps:
# MAGIC * Run the cell above
# MAGIC * Copy the entire output to your clipboard
# MAGIC * Navigate to the Databricks SQL workspace
# MAGIC * Make sure that a DBSQL endpoint is running
# MAGIC * Use the left sidebar to select the **SQL Editor**
# MAGIC * Paste the query above and click the blue **Run** in the top right
# MAGIC
# MAGIC **NOTE**: You will need to be connected to a DBSQL endpoint to execute these queries successfully. If you cannot connect to a DBSQL endpoint, you will need to contact your administrator to give you access.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Find Your Database
# MAGIC In the Data Explorer, find the database you created earlier (this should follow the pattern `dbacademy_<username>_acls_lab`).
# MAGIC
# MAGIC Clicking on the database name should display a list of the contained tables and views on the left hand side. On the right, you'll see some details about the database, including the **Owner** and **Location**.
# MAGIC
# MAGIC Click the **Permissions** tab to review who presently has permissions (depending on your workspace configuration, some permissions may have been inherited from settings on the catalog).
# COMMAND ----------
# MAGIC %md
# MAGIC ## Change Database Permissions
# MAGIC
# MAGIC Step:
# MAGIC 1. Make sure you have the **Permissions** tab selected for the database
# MAGIC 1. Click the blue **Grant** button
# MAGIC 1. Select the **USAGE**, **SELECT**, and **READ_METADATA** options
# MAGIC 1. Enter the username of your partner in the field at the top.
# MAGIC 1. Click **OK**
# MAGIC
# MAGIC Confirm with your partner that you can each see each others' databases and tables.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Run a Query to Confirm
# MAGIC
# MAGIC By granting `USAGE`, `SELECT`, and `READ_METADATA` on your database, your partner should now be able to freely query the tables and views in this database, but will not be able to create new tables OR modify your data.
# MAGIC
# MAGIC In the SQL Editor, each user should run a series of queries to confirm this behavior in the database they were just added to.
# MAGIC
# MAGIC **Make sure you specify your partner's database while running the queries below.**
# MAGIC
# MAGIC Queries to execute:
# MAGIC * `SELECT * FROM <database>.beans`
# MAGIC * `SELECT * FROM <database>.tasty_beans`
# MAGIC * `SELECT * FROM <database>.beans MINUS SELECT * FROM <database>.tasty_beans`
# MAGIC * ```
# MAGIC UPDATE <database>.beans
# MAGIC SET color = 'pink'
# MAGIC WHERE name = 'black'
# MAGIC ```
# MAGIC
# MAGIC **NOTE**: These first 3 queries should succeed, but the last should fail.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Execute a Query to Generate the Union of Your Beans
# MAGIC
# MAGIC Modify the query below to specify the `beans` tables in each of your databases.
# MAGIC
# MAGIC ```
# MAGIC SELECT * FROM <database>.beans
# MAGIC UNION ALL TABLE <database>.beans
# MAGIC ```
# MAGIC
# MAGIC **NOTE**: Because random values were inserted for the `grams` and `delicious` columns, you should see 2 distinct rows for each `name`, `color` pair.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Register a Derivative View to Your Database
# MAGIC
# MAGIC Modify the query below to register the results of the previous query to your database.
# MAGIC
# MAGIC ```
# MAGIC CREATE VIEW <database>.our_beans AS
# MAGIC SELECT * FROM <database>.beans
# MAGIC UNION ALL TABLE <database>.beans
# MAGIC ```
# COMMAND ----------
# MAGIC %md
# MAGIC ## Query Your Partner's View
# MAGIC
# MAGIC Once your partner has successfully completed the previous step, run the following query against each of your tables; you should get the same results:
# MAGIC
# MAGIC ```
# MAGIC SELECT name, color, delicious, sum(grams)
# MAGIC FROM our_beans
# MAGIC GROUP BY name, color, delicious
# MAGIC ```
# COMMAND ----------
# MAGIC %md
# MAGIC ## Add Modify Permissions
# MAGIC
# MAGIC Now try to drop each other's `beans` tables. At the moment, this shouldn't work.
# MAGIC
# MAGIC Using the Data Explorer, add the `MODIFY` permission for your `beans` table for your partner.
# MAGIC
# MAGIC Again, attempt to drop your partner's `beans` table. This time, it should succeed.
# MAGIC
# MAGIC Try to re-execute queries against any of the views of tables you'd previously queried in this lab.
# MAGIC
# MAGIC **NOTE**: If steps were completed successfully, none of your previous queries should work, as the data referenced by your views has been permanently deleted. This demonstrates the risks associated with providing `MODIFY` privileges to users on data that will be used in production applications and dashboards.
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 42.068627 | 418 | 0.708253 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Last Mile ETL with Databricks SQL
# MAGIC
# MAGIC Before we continue, let's do a recap of some of the things we've learned so far:
# MAGIC 1. The Databricks workspace contains a suite of tools to simplify the data engineering development lifecycle
# MAGIC 1. Databricks notebooks allow users to mix SQL with other programming languages to define ETL workloads
# MAGIC 1. Delta Lake provides ACID compliant transactions and makes incremental data processing easy in the Lakehouse
# MAGIC 1. Delta Live Tables extends the SQL syntax to support many design patterns in the Lakehouse, and simplifies infrastructure deployment
# MAGIC 1. Multi-task jobs allows for full task orchestration, adding dependencies while scheduling a mix of notebooks and DLT pipelines
# MAGIC 1. Databricks SQL allows users to edit and execute SQL queries, build visualizations, and define dashboards
# MAGIC 1. Data Explorer simplifies managing Table ACLs, making Lakehouse data available to SQL analysts (soon to be expanded greatly by Unity Catalog)
# MAGIC
# MAGIC In this section, we'll focus on exploring more DBSQL functionality to support production workloads.
# MAGIC
# MAGIC We'll start by focusing on leveraging Databricks SQL to configure queries that support last mile ETL for analytics. Note that while we'll be using the Databricks SQL UI for this demo, SQL Endpoints [integrate with a number of other tools to allow external query execution](https://docs.databricks.com/integrations/partners.html, as well as having [full API support for executing arbitrary queries programmatically](https://docs.databricks.com/sql/api/index.html).
# MAGIC
# MAGIC From these query results, we'll generate a series of visualizations, which we'll combine into a dashboard.
# MAGIC
# MAGIC Finally, we'll walk through scheduling updates for queries and dashboards, and demonstrate setting alerts to help monitor the state of production datasets over time.
# MAGIC
# MAGIC ## Learning Objectives
# MAGIC By the end of this lesson, you will feel confident:
# MAGIC * Using Databricks SQL as a tool to support production ETL tasks backing analytic workloads
# MAGIC * Configuring SQL queries and visualizations with the Databricks SQL Editor
# MAGIC * Creating dashboards in Databricks SQL
# MAGIC * Scheduling updates for queries and dashboards
# MAGIC * Setting alerts for SQL queries
# COMMAND ----------
# MAGIC %md
# MAGIC ## Run Setup Script
# MAGIC The following cells runs a notebook that defines a class we'll use to generate SQL queries.
# COMMAND ----------
# MAGIC %run ../Includes/query_generator
# COMMAND ----------
# MAGIC %md
# MAGIC Executing the following cell will reset the database set variables for later query formatting. You can remove the `mode="reset"` argument if you wish to print out the queries without resetting the target database.
# COMMAND ----------
Gen = QueryGenerator(course="4_4", mode="reset")
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create a Demo Database
# MAGIC Execute the following cell and copy the results into the Databricks SQL Editor.
# MAGIC
# MAGIC These queries:
# MAGIC * Create a new database
# MAGIC * Declare two tables (we'll use these for loading data)
# MAGIC * Declare two functions (we'll use these for generating data)
# MAGIC
# MAGIC Once copied, execute the query using the **Run** button.
# COMMAND ----------
Gen.config()
# COMMAND ----------
# MAGIC %md
# MAGIC **NOTE**: The queries above are only designed to be run once after resetting the demo completely to reconfigure the environment. Users will need to have `CREATE` and `USAGE` permissions on the catalog to execute them.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create a Query to Load Data
# MAGIC Execute the cell below to print out a formatted SQL query for loading data in the `user_ping` table created in the previous step.
# MAGIC
# MAGIC Save this query with the name **Load Ping Data**.
# MAGIC
# MAGIC Run this query to load a batch of data.
# COMMAND ----------
Gen.load()
# COMMAND ----------
# MAGIC %md
# MAGIC Executing the query should load some data and return a preview of the data in the table.
# MAGIC
# MAGIC **NOTE**: Random numbers are being used to define and load data, so each user will have slightly different values present.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Set a Query Refresh Schedule
# MAGIC
# MAGIC Steps:
# MAGIC * Locate the **Refresh Schedule** field at the bottom right of the SQL query editor box; click the blue **Never**
# MAGIC * Use the drop down to change to Refresh every **1 minute**
# MAGIC * For **Ends**, click the **On** radio button
# MAGIC * Select tomorrow's date
# MAGIC * Click **OK**
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create a Query to Track Total Records
# MAGIC
# MAGIC Execute the cell below to print out a formatted SQL query to track total records in the `user_ping` table.
# MAGIC
# MAGIC Save this query with the name **User Counts**.
# MAGIC
# MAGIC Run the query to calculate the current results.
# COMMAND ----------
Gen.user_counts()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create a Bar Graph Visualization
# MAGIC
# MAGIC Steps:
# MAGIC * Click the **Add Visualization** button
# MAGIC * Click on the name (should default to something like `Visualization 1`) and change the name to **Total User Records**
# MAGIC * Set `user_id` for the **X Column**
# MAGIC * Set `total_records` for the **Y Columns**
# MAGIC * Click **Save**
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create a New Dashboard
# MAGIC
# MAGIC Steps:
# MAGIC * Click the button with three vertical dots at the bottom of the screen and select **Add to Dashboard**.
# MAGIC * Click the **Create new dashboard** option
# MAGIC * Name your dashboard **User Ping Summary `<your_initials_here>`**
# MAGIC * Click **Save** to create the new dashboard
# MAGIC * Your newly created dashboard should now be selected as the target; click **OK** to add your visualization
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create a Query to Calculate the Recent Average Ping
# MAGIC
# MAGIC Execute the cell below to print out a formatted SQL query to calculate the average ping observed per user over a 3 minute window.
# MAGIC
# MAGIC Save this query with the name **Avg Ping**.
# MAGIC
# MAGIC Run the query to calculate the current results.
# COMMAND ----------
Gen.avg_ping()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Add a Line Plot Visualization to your Dashboard
# MAGIC
# MAGIC Steps:
# MAGIC * Click the **Add Visualization** button
# MAGIC * Click on the name (should default to something like `Visualization 1`) and change the name to **Avg User Ping**
# MAGIC * Select `Line` for the **Visualization Type**
# MAGIC * Set `end_time` for the **X Column**
# MAGIC * Set `avg_ping` for the **Y Columns**
# MAGIC * Set `user_id` for the **Group by**
# MAGIC * Click **Save**
# MAGIC * Click the button with three vertical dots at the bottom of the screen and select **Add to Dashboard**.
# MAGIC * Select the dashboard you created earlier
# MAGIC * Click **OK** to add your visualization
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create a Query to Report Summary Statistics
# MAGIC
# MAGIC Execute the cell below to print out a formatted SQL query that summarizes all records for a user.
# MAGIC
# MAGIC Save this query with the name **Ping Summary**.
# MAGIC
# MAGIC Run the query to calculate the current results.
# COMMAND ----------
Gen.summary()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Add the Summary Table to your Dashboard
# MAGIC
# MAGIC Steps:
# MAGIC * Click the button with three vertical dots at the bottom of the screen and select **Add to Dashboard**.
# MAGIC * Select the dashboard you created earlier
# MAGIC * Click **OK** to add your visualization
# COMMAND ----------
# MAGIC %md
# MAGIC ## Review and Refresh your Dashboard
# MAGIC
# MAGIC Steps:
# MAGIC * Use the left side bar to navigate to **Dashboards**
# MAGIC * Find the dashboard you've added your queries to
# MAGIC * Click the blue **Refresh** button to update your dashboard
# MAGIC * Click the **Schedule** button to review dashboard scheduling options
# MAGIC * Note that scheduling a dashboard to update will execute all queries associated with that dashboard
# MAGIC * Do not schedule the dashboard at this time
# COMMAND ----------
# MAGIC %md
# MAGIC ## Share your Dashboard
# MAGIC
# MAGIC Steps:
# MAGIC * Click the blue **Share** button
# MAGIC * Select **All Users** from the top field
# MAGIC * Choose **Can Run** from the right field
# MAGIC * Click **Add**
# MAGIC * Change the **Credentials** to **Run as viewer**
# MAGIC
# MAGIC **NOTE**: At present, no other users should have any permissions to run your dashboard, as they have not been granted permissions to the underlying databases and tables using Table ACLs. If you wish other users to be able to trigger updates to your dashboard, you will either need to grant them permissions to **Run as owner** or add permissions for the tables referenced in your queries.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Set Up an Alert
# MAGIC
# MAGIC Steps:
# MAGIC * Use the left side bar to navigate to **Alerts**
# MAGIC * Click **Create Alert** in the top right
# MAGIC * Click the field at the top left of the screen to give the alert a name **`<your_initials> Count Check`**
# MAGIC * Select your **User Counts** query
# MAGIC * For the **Trigger when** options, configure:
# MAGIC * **Value column**: `total_records`
# MAGIC * **Condition**: `>`
# MAGIC * **Threshold**: 15
# MAGIC * For **Refresh**, select **Never**
# MAGIC * Click **Create Alert**
# MAGIC * On the next screen, click the blue **Refresh** in the top right to evaluate the alert
# COMMAND ----------
# MAGIC %md
# MAGIC ## Review Alert Destination Options
# MAGIC
# MAGIC
# MAGIC
# MAGIC Steps:
# MAGIC * From the preview of your alert, click the blue **Add** button to the right of **Destinations** on the right side of the screen
# MAGIC * At the bottom of the window that pops up, locate the and click the blue text in the message **Create new destinations in Alert Destinations**
# MAGIC * Review the available alerting options
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 39.430147 | 471 | 0.715442 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## End-to-End ETL in the Lakehouse
# MAGIC
# MAGIC In this notebook, you will pull together concepts learned throughout the course to complete an example data pipeline.
# MAGIC
# MAGIC The following is a non-exhaustive list of skills and tasks necessary to successfully complete this exercise:
# MAGIC * Using Databricks notebooks to write queries in SQL and Python
# MAGIC * Creating and modifying databases, tables, and views
# MAGIC * Using Auto Loader and Spark Structured Streaming for incremental data processing in a multi-hop architecture
# MAGIC * Using Delta Live Table SQL syntax
# MAGIC * Configuring a Delta Live Table pipeline for continuous processing
# MAGIC * Using Databricks Jobs to orchestrate tasks from notebooks stored in Repos
# MAGIC * Setting chronological scheduling for Databricks Jobs
# MAGIC * Defining queries in Databricks SQL
# MAGIC * Creating visualizations in Databricks SQL
# MAGIC * Defining Databricks SQL dashboards to review metrics and results
# COMMAND ----------
# MAGIC %md
# MAGIC ## Run Setup
# MAGIC Run the following cell to reset all the databases and diretories associated with this lab.
# COMMAND ----------
# MAGIC %run "../../Includes/dlt-setup" $mode="reset"
# COMMAND ----------
# MAGIC %md
# MAGIC ## Land Initial Data
# MAGIC Seed the landing zone with some data before proceeding.
# COMMAND ----------
File.newData()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create and Configure a DLT Pipeline
# MAGIC **NOTE**: The main difference between the instructions here and in previous labs with DLT is that in this instance, we will be setting up our pipeline for **Continuous** execution in **Production** mode.
# MAGIC
# MAGIC Steps:
# MAGIC 1. Click the **Jobs** button on the sidebar, then select the **Delta Live Tables** tab.
# MAGIC 1. Click **Create Pipeline**.
# MAGIC 1. Fill in a **Pipeline Name** of your choosing.
# MAGIC 1. For **Notebook Libraries**, use the navigator to locate and select the notebook `1 - DLT Task`.
# MAGIC 1. Run the cell below to generate values for **source**, **Target** and **Storage Location**. (All of these will include your current username).
# MAGIC * Click `Add configuration`; enter the word `source` in the **Key** field and the output printed next to `source` below in the value field.
# MAGIC * Enter the database name printed next to `Target` below in the **Target** field.
# MAGIC * Enter the location printed next to `Storage Location` below in the **Storage Location** field.
# MAGIC 1. Set **Pipeline Mode** to **Continuous**.
# MAGIC 1. Disable autoscaling.
# MAGIC 1. Set the number of wokers to 1.
# MAGIC 1. Click **Create**.
# MAGIC
# MAGIC In the UI that populates, change from **Development** to **Production** mode. This should begin the deployment of infrastructure.
# COMMAND ----------
storage_location = userhome + "/output"
print(f"source : {dataLandingLocation.split(':')[1]}")
print(f"Target: {database}")
print(f"Storage Location: {storage_location.split(':')[1]}")
# COMMAND ----------
# MAGIC %md
# MAGIC ## Schedule a Notebook Job
# MAGIC
# MAGIC Our DLT pipeline is setup to process data as soon as it arrives. We'll schedule a notebook to land a new batch of data each minute so we can see this functionality in action.
# MAGIC
# MAGIC Steps:
# MAGIC 1. Navigate to the Jobs UI using the Databricks left side navigation bar.
# MAGIC 1. Click the blue `Create Job` button
# MAGIC 1. Configure the task:
# MAGIC 1. Enter `Land-Data` for the task name
# MAGIC 1. Select the notebook `2 - Land New Data` using the notebook picker
# MAGIC 1. Select an Existing All Purpose Cluster from the **Cluster** dropdown
# MAGIC 1. Click **Create**
# MAGIC
# MAGIC **Note**: When selecting your all purpose cluster, you will get a warning about how this will be billed as all purpose compute. Production jobs should always be scheduled against new job clusters appropriately sized for the workload, as this is billed at a much lower rate.
# MAGIC
# MAGIC ## Set a Chronological Schedule for your Job
# MAGIC Steps:
# MAGIC * On the right hand side of the Jobs UI, locate **Schedule** section.
# MAGIC * Click on the **Edit schedule** button to explore scheduling options.
# MAGIC * Change **Schedule type** field from **Manual** to **Scheduled** will bring up a chron scheduling UI.
# MAGIC * Set the schedule to update every **2 minutes**
# MAGIC * Click **Save**
# MAGIC
# MAGIC **NOTE**: If you wish, you can click **Run now** to trigger the first run, or wait until the top of the next minute to make sure your scheduling has worked successfully.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Register DLT Event Metrics for Querying with DBSQL
# MAGIC
# MAGIC The following cell prints out SQL statements to register the DLT event logs to your target database for querying in DBSQL.
# MAGIC
# MAGIC Execute the output code with the DBSQL Query Editor to register these tables and views. Explore each and make note of the logged event metrics.
# COMMAND ----------
print(f"""
CREATE TABLE IF NOT EXISTS {database}.dlt_events
LOCATION '{storage_location}/system/events';
CREATE VIEW IF NOT EXISTS {database}.dlt_success AS
SELECT * FROM {database}.dlt_events
WHERE details:flow_progress:metrics IS NOT NULL;
CREATE VIEW IF NOT EXISTS {database}.dlt_metrics AS
SELECT timestamp, origin.flow_name, details
FROM {database}.dlt_success
ORDER BY timestamp DESC;
""")
# COMMAND ----------
# MAGIC %md
# MAGIC ## Define a Query on the Gold Table
# MAGIC
# MAGIC The `daily_patient_avg` table is automatically updated each time a new batch of data is processed through the DLT pipeline. Each time a query is executed against this table, DBSQL will confirm if there is a newer version and then materialize results from the newest available version.
# MAGIC
# MAGIC Run the following cell to print out a query with your database name. Save this as a DBSQL query.
# COMMAND ----------
print(f"SELECT * FROM {database}.daily_patient_avg")
# COMMAND ----------
# MAGIC %md
# MAGIC ## Add a Line Plot Visualization
# MAGIC
# MAGIC To track trends in patient averages over time, create a line plot and add it to a new dashboard.
# MAGIC
# MAGIC Create a line plot with the following settings:
# MAGIC * **X Column**: `date`
# MAGIC * **Y Columns**: `avg_heartrate`
# MAGIC * **Group By**: `name`
# MAGIC
# MAGIC Add this visualization to a dashboard.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Track Data Processing Progress
# MAGIC
# MAGIC The code below extracts the `flow_name`, `timestamp`, and `num_output_rows` from the DLT event logs.
# MAGIC
# MAGIC Save this query in DBSQL, then define a bar plot visualization that shows:
# MAGIC * **X Column**: `timestamp`
# MAGIC * **Y Columns**: `num_output_rows`
# MAGIC * **Group By**: `flow_name`
# MAGIC
# MAGIC Add your visualization to your dashboard.
# COMMAND ----------
print(f"""
SELECT flow_name, timestamp, int(details:flow_progress:metrics:num_output_rows) num_output_rows
FROM {database}.dlt_metrics
ORDER BY timestamp DESC
""")
# COMMAND ----------
# MAGIC %md
# MAGIC ## Refresh your Dashboard and Track Results
# MAGIC
# MAGIC The `Land-Data` notebook scheduled with Jobs above has 12 batches of data, each representing a month of recordings for our small sampling of patients. As configured per our instructions, it should take just over 20 minutes for all of these batches of data to be triggered and processed (we scheduled the Databricks Job to run every 2 minutes, and batches of data will process through our pipeline very quickly after initial ingestion).
# MAGIC
# MAGIC Refresh your dashboard and review your visualizations to see how many batches of data have been processed. (If you followed the instructions as outlined here, there should be 12 distinct flow updates tracked by your DLT metrics.) If all source data has not yet been processed, you can go back to the Databricks Jobs UI and manually trigger additional batches.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Execute a Query to Repair Broken Data
# MAGIC
# MAGIC Review the code that defined the `recordings_enriched` table to identify the filter applied for the quality check.
# MAGIC
# MAGIC In the cell below, write a query that returns all the records from the `recordings_bronze` table that were refused by this quality check.
# COMMAND ----------
# ANSWER
display(spark.sql(f"SELECT * FROM {database}.recordings_bronze WHERE heartrate <= 0"))
# COMMAND ----------
# MAGIC %md
# MAGIC For the purposes of our demo, let's assume that thorough manual review of our data and systems has demonstrated that occassionally otherwise valid heartrate recordings are returned as negative values.
# MAGIC
# MAGIC Run the following query to examine these same rows with the negative sign removed.
# COMMAND ----------
display(spark.sql(f"SELECT abs(heartrate), * FROM {database}.recordings_bronze WHERE heartrate <= 0"))
# COMMAND ----------
# MAGIC %md
# MAGIC To complete our dataset, we wish to insert these fixed records into the silver `recordings_enriched` table.
# MAGIC
# MAGIC Use the cell below to update the query used in the DLT pipeline to execute this repair.
# MAGIC
# MAGIC **NOTE**: Make sure you update the code to only process those records that were previously rejected due to the quality check.
# COMMAND ----------
# ANSWER
spark.sql(f"""
MERGE INTO {database}.recordings_enriched t
USING (SELECT
CAST(a.device_id AS INTEGER) device_id,
CAST(a.mrn AS LONG) mrn,
abs(CAST(a.heartrate AS DOUBLE)) heartrate,
CAST(from_unixtime(a.time, 'yyyy-MM-dd HH:mm:ss') AS TIMESTAMP) time,
b.name
FROM {database}.recordings_bronze a
INNER JOIN {database}.pii b
ON a.mrn = b.mrn
WHERE heartrate <= 0) v
ON t.mrn=v.mrn AND t.time=v.time
WHEN NOT MATCHED THEN INSERT *
""")
# COMMAND ----------
# MAGIC %md
# MAGIC Use the cell below to manually or programmatically confirm that this update has been successful.
# MAGIC
# MAGIC (The total number of records in the `recordings_bronze` should now be equal to the total records in `recordings_enriched`).
# COMMAND ----------
# ANSWER
assert spark.table(f"{database}.recordings_bronze").count() == spark.table(f"{database}.recordings_enriched").count()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Consider Production Data Permissions
# MAGIC
# MAGIC Note that while our manual repair of the data was successful, as the owner of these datasets, by default we have permissions to modify or delete these data from any location we're executing code.
# MAGIC
# MAGIC To put this another way: our current permissions would allow us to change or drop our production tables permanently if an errant SQL query is accidentally executed with the current user's permissions (or if other users are granted similar permissions).
# MAGIC
# MAGIC While for the purposes of this lab, we desired to have full permissions on our data, as we move code from development to production, it is safer to leverage [service principals](https://docs.databricks.com/administration-guide/users-groups/service-principals.html) when scheduling Jobs and DLT Pipelines to avoid accidental data modifications.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Shut Down Production Infrastructure
# MAGIC
# MAGIC Note that Databricks Jobs, DLT Pipelines, and scheduled DBSQL queries and dashboards are all designed to provide sustained execution of production code. In this end-to-end demo, you were instructed to configure a Job and Pipeline for continuous data processing. To prevent these workloads from continuing to execute, you should **Pause** your Databricks Job and **Stop** your DLT pipeline. Deleting these assets will also ensure that production infrastructure is terminated.
# MAGIC
# MAGIC **NOTE**: All instructions for DBSQL asset scheduling in previous lessons instructed users to set the update schedule to end tomorrow. You may choose to go back and also cancel these updates to prevent DBSQL endpoints from staying on until that time.
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 44.786477 | 482 | 0.730198 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %run ../../Includes/dlt-setup
# COMMAND ----------
File.newData()
| 11 | 37 | 0.616822 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
import pyspark.sql.functions as F
import re
course_name = "eltsql"
username = spark.sql("SELECT current_user()").first()[0]
clean_username = re.sub("[^a-zA-Z0-9]", "_", username)
database = f"""{clean_username}_dbacademy_{course_name}"""
userhome = f"dbfs:/user/{username}/dbacademy/{course_name}"
print(f"username: {username}")
print(f"clean_username: {clean_username}")
print(f"database: {database}")
print(f"userhome: {userhome}")
dbutils.fs.rm(userhome, True)
print(f"Dropping the database {database}")
spark.sql(f"DROP DATABASE IF EXISTS {database} CASCADE")
| 27.090909 | 59 | 0.696921 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
for stream in spark.streams.active:
stream.stop()
# COMMAND ----------
import pyspark.sql.functions as F
import re
course_name = "dewd"
username = spark.sql("SELECT current_user()").first()[0]
clean_username = re.sub("[^a-zA-Z0-9]", "_", username)
database = f"dbacademy_{clean_username}_{course_name}"
userhome = f"dbfs:/user/{username}/dbacademy/{course_name}"
print(f"""
username: {username}
userhome: {userhome}
database: {database}""")
dbutils.widgets.text("mode", "setup")
mode = dbutils.widgets.get("mode")
if mode == "reset" or mode == "clean":
spark.sql(f"DROP DATABASE IF EXISTS {database} CASCADE")
dbutils.fs.rm(userhome, True)
if mode != "clean":
spark.sql(f"CREATE DATABASE IF NOT EXISTS {database}")
spark.sql(f"USE {database}")
# COMMAND ----------
# MAGIC %run ./mount-datasets
# COMMAND ----------
outputPath = userhome + "/streaming-concepts"
checkpointPath = outputPath + "/checkpoint"
# original dataset
dataSource = "/mnt/training/definitive-guide/data/activity-json/streaming"
# data landing location; files will be copies from original dataset one at a time for incremental ingestion use case
dataLandingLocation = outputPath + "/landing-zone"
outputTable = "bronze_table"
spark.conf.set('c.outputTable', outputTable)
# COMMAND ----------
class FileArrival:
def __init__(self, dataSource, landingZone):
self.sourceFiles = dbutils.fs.ls(dataSource)
dbutils.fs.mkdirs(landingZone)
self.landingZone = landingZone
self.fileID = 0
def newData(self, numFiles=1):
for i in range(numFiles):
dbutils.fs.cp(self.sourceFiles[self.fileID].path, self.landingZone)
self.fileID+=1
# COMMAND ----------
File = FileArrival(dataSource, dataLandingLocation)
File.newData()
| 24.375 | 116 | 0.67908 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
for stream in spark.streams.active:
stream.stop()
# COMMAND ----------
import pyspark.sql.functions as F
import re
dbutils.widgets.text("course", "dewd")
course_name = dbutils.widgets.get("course")
username = spark.sql("SELECT current_user()").first()[0]
clean_username = re.sub("[^a-zA-Z0-9]", "_", username)
database = f"dbacademy_{clean_username}_{course_name}"
userhome = f"dbfs:/user/{username}/dbacademy/{course_name}"
print(f"""
username: {username}
userhome: {userhome}
database: {database}""")
dbutils.widgets.text("mode", "setup")
mode = dbutils.widgets.get("mode")
if mode == "reset" or mode == "clean":
spark.sql(f"DROP DATABASE IF EXISTS {database} CASCADE")
dbutils.fs.rm(userhome, True)
# COMMAND ----------
# MAGIC %run ./mount-datasets
# COMMAND ----------
sqlContext.setConf("spark.sql.shuffle.partitions", spark.sparkContext.defaultParallelism)
# COMMAND ----------
dataSource = "/mnt/training/healthcare"
dataLandingLocation = userhome + "/source"
bronzePath = userhome + "/bronze"
recordingsParsedPath = userhome + "/silver/recordings_parsed"
recordingsEnrichedPath = userhome + "/silver/recordings_enriched"
dailyAvgPath = userhome + "/gold/dailyAvg"
checkpointPath = userhome + "/checkpoints"
bronzeCheckpoint = userhome + "/checkpoints/bronze"
recordingsParsedCheckpoint = userhome + "/checkpoints/recordings_parsed"
recordingsEnrichedCheckpoint = userhome + "/checkpoints/recordings_enriched"
dailyAvgCheckpoint = userhome + "/checkpoints/dailyAvgPath"
# COMMAND ----------
class FileArrival:
def __init__(self):
self.source = dataSource + "/tracker/streaming/"
self.userdir = dataLandingLocation + "/"
try:
self.curr_mo = 1 + int(max([x[1].split(".")[0] for x in dbutils.fs.ls(self.userdir)]))
except:
self.curr_mo = 1
def newData(self, continuous=False):
if self.curr_mo > 12:
print("Data source exhausted\n")
elif continuous == True:
while self.curr_mo <= 12:
curr_file = f"{self.curr_mo:02}.json"
dbutils.fs.cp(self.source + curr_file, self.userdir + curr_file)
self.curr_mo += 1
else:
curr_file = f"{str(self.curr_mo).zfill(2)}.json"
dbutils.fs.cp(self.source + curr_file, self.userdir + curr_file)
self.curr_mo += 1
File = FileArrival()
| 29.814815 | 98 | 0.633267 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %scala
# MAGIC def cloudAndRegion = {
# MAGIC import com.databricks.backend.common.util.Project
# MAGIC import com.databricks.conf.trusted.ProjectConf
# MAGIC import com.databricks.backend.daemon.driver.DriverConf
# MAGIC val conf = new DriverConf(ProjectConf.loadLocalConfig(Project.Driver))
# MAGIC (conf.cloudProvider.getOrElse("Unknown"), conf.region)
# MAGIC }
# MAGIC
# MAGIC // These keys are read-only so they're okay to have here
# MAGIC val awsAccessKey = "AKIAJBRYNXGHORDHZB4A"
# MAGIC val awsSecretKey = "a0BzE1bSegfydr3%2FGE3LSPM6uIV5A4hOUfpH8aFF"
# MAGIC val awsAuth = s"${awsAccessKey}:${awsSecretKey}"
# MAGIC
# MAGIC def getAwsMapping(region:String):(String,Map[String,String]) = {
# MAGIC
# MAGIC val MAPPINGS = Map(
# MAGIC "ap-northeast-1" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-ap-northeast-1/common", Map[String,String]()),
# MAGIC "ap-northeast-2" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-ap-northeast-2/common", Map[String,String]()),
# MAGIC "ap-south-1" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-ap-south-1/common", Map[String,String]()),
# MAGIC "ap-southeast-1" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-ap-southeast-1/common", Map[String,String]()),
# MAGIC "ap-southeast-2" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-ap-southeast-2/common", Map[String,String]()),
# MAGIC "ca-central-1" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-ca-central-1/common", Map[String,String]()),
# MAGIC "eu-central-1" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-eu-central-1/common", Map[String,String]()),
# MAGIC "eu-west-1" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-eu-west-1/common", Map[String,String]()),
# MAGIC "eu-west-2" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-eu-west-2/common", Map[String,String]()),
# MAGIC "eu-west-3" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-eu-central-1/common", Map[String,String]()),
# MAGIC
# MAGIC "sa-east-1" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-sa-east-1/common", Map[String,String]()),
# MAGIC "us-east-1" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-us-east-1/common", Map[String,String]()),
# MAGIC "us-east-2" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training-us-east-2/common", Map[String,String]()),
# MAGIC "us-west-2" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training/common", Map[String,String]()),
# MAGIC "_default" -> (s"s3a://${awsAccessKey}:${awsSecretKey}@databricks-corp-training/common", Map[String,String]())
# MAGIC )
# MAGIC
# MAGIC MAPPINGS.getOrElse(region, MAPPINGS("_default"))
# MAGIC }
# MAGIC
# MAGIC def getAzureMapping(region:String):(String,Map[String,String]) = {
# MAGIC
# MAGIC var MAPPINGS = Map(
# MAGIC "australiacentral" -> ("dbtrainaustraliasoutheas",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=br8%2B5q2ZI9osspeuPtd3haaXngnuWPnZaHKFoLmr370%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "australiacentral2" -> ("dbtrainaustraliasoutheas",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=br8%2B5q2ZI9osspeuPtd3haaXngnuWPnZaHKFoLmr370%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "australiaeast" -> ("dbtrainaustraliaeast",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=FM6dy59nmw3f4cfN%2BvB1cJXVIVz5069zHmrda5gZGtU%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "australiasoutheast" -> ("dbtrainaustraliasoutheas",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=br8%2B5q2ZI9osspeuPtd3haaXngnuWPnZaHKFoLmr370%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "canadacentral" -> ("dbtraincanadacentral",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=dwAT0CusWjvkzcKIukVnmFPTmi4JKlHuGh9GEx3OmXI%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "canadaeast" -> ("dbtraincanadaeast",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=SYmfKBkbjX7uNDnbSNZzxeoj%2B47PPa8rnxIuPjxbmgk%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "centralindia" -> ("dbtraincentralindia",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=afrYm3P5%2BB4gMg%2BKeNZf9uvUQ8Apc3T%2Bi91fo/WOZ7E%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "centralus" -> ("dbtraincentralus",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=As9fvIlVMohuIV8BjlBVAKPv3C/xzMRYR1JAOB%2Bbq%2BQ%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "eastasia" -> ("dbtraineastasia",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=sK7g5pki8bE88gEEsrh02VGnm9UDlm55zTfjZ5YXVMc%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "eastus" -> ("dbtraineastus",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=tlw5PMp1DMeyyBGTgZwTbA0IJjEm83TcCAu08jCnZUo%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "eastus2" -> ("dbtraineastus2",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=Y6nGRjkVj6DnX5xWfevI6%2BUtt9dH/tKPNYxk3CNCb5A%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "japaneast" -> ("dbtrainjapaneast",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=q6r9MS/PC9KLZ3SMFVYO94%2BfM5lDbAyVsIsbBKEnW6Y%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "japanwest" -> ("dbtrainjapanwest",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=M7ic7/jOsg/oiaXfo8301Q3pt9OyTMYLO8wZ4q8bko8%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "northcentralus" -> ("dbtrainnorthcentralus",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=GTLU0g3pajgz4dpGUhOpJHBk3CcbCMkKT8wxlhLDFf8%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "northcentralus" -> ("dbtrainnorthcentralus",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=GTLU0g3pajgz4dpGUhOpJHBk3CcbCMkKT8wxlhLDFf8%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "northeurope" -> ("dbtrainnortheurope",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=35yfsQBGeddr%2BcruYlQfSasXdGqJT3KrjiirN/a3dM8%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "southcentralus" -> ("dbtrainsouthcentralus",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=3cnVg/lzWMx5XGz%2BU4wwUqYHU5abJdmfMdWUh874Grc%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "southcentralus" -> ("dbtrainsouthcentralus",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=3cnVg/lzWMx5XGz%2BU4wwUqYHU5abJdmfMdWUh874Grc%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "southindia" -> ("dbtrainsouthindia",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=0X0Ha9nFBq8qkXEO0%2BXd%2B2IwPpCGZrS97U4NrYctEC4%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "southeastasia" -> ("dbtrainsoutheastasia",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=H7Dxi1yqU776htlJHbXd9pdnI35NrFFsPVA50yRC9U0%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "uksouth" -> ("dbtrainuksouth",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=SPAI6IZXmm%2By/WMSiiFVxp1nJWzKjbBxNc5JHUz1d1g%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "ukwest" -> ("dbtrainukwest",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=olF4rjQ7V41NqWRoK36jZUqzDBz3EsyC6Zgw0QWo0A8%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "westcentralus" -> ("dbtrainwestcentralus",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=UP0uTNZKMCG17IJgJURmL9Fttj2ujegj%2BrFN%2B0OszUE%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "westeurope" -> ("dbtrainwesteurope",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=csG7jGsNFTwCArDlsaEcU4ZUJFNLgr//VZl%2BhdSgEuU%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "westindia" -> ("dbtrainwestindia",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=fI6PNZ7YvDGKjArs1Et2rAM2zgg6r/bsKEjnzQxgGfA%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "westus" -> ("dbtrainwestus",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=%2B1XZDXbZqnL8tOVsmRtWTH/vbDAKzih5ThvFSZMa3Tc%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "westus2" -> ("dbtrainwestus2",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=DD%2BO%2BeIZ35MO8fnh/fk4aqwbne3MAJ9xh9aCIU/HiD4%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z"),
# MAGIC "_default" -> ("dbtrainwestus2",
# MAGIC "?ss=b&sp=rl&sv=2018-03-28&st=2018-04-01T00%3A00%3A00Z&sig=DD%2BO%2BeIZ35MO8fnh/fk4aqwbne3MAJ9xh9aCIU/HiD4%3D&srt=sco&se=2023-04-01T00%3A00%3A00Z")
# MAGIC )
# MAGIC
# MAGIC val (account: String, sasKey: String) = MAPPINGS.getOrElse(region, MAPPINGS("_default"))
# MAGIC
# MAGIC val blob = "training"
# MAGIC val source = s"wasbs://$blob@$account.blob.core.windows.net/"
# MAGIC val configMap = Map(
# MAGIC s"fs.azure.sas.$blob.$account.blob.core.windows.net" -> sasKey
# MAGIC )
# MAGIC
# MAGIC (source, configMap)
# MAGIC }
# MAGIC
# MAGIC def retryMount(source: String, mountPoint: String): Unit = {
# MAGIC try {
# MAGIC // Mount with IAM roles instead of keys for PVC
# MAGIC dbutils.fs.mount(source, mountPoint)
# MAGIC dbutils.fs.ls(mountPoint) // Test read to confirm successful mount.
# MAGIC } catch {
# MAGIC case e: Exception => throw new RuntimeException(s"*** ERROR: Unable to mount $mountPoint: ${e.getMessage}", e)
# MAGIC }
# MAGIC }
# MAGIC
# MAGIC def mount(source: String, extraConfigs:Map[String,String], mountPoint: String): Unit = {
# MAGIC try {
# MAGIC dbutils.fs.mount(source, mountPoint, extraConfigs=extraConfigs)
# MAGIC dbutils.fs.ls(mountPoint) // Test read to confirm successful mount.
# MAGIC } catch {
# MAGIC case ioe: java.lang.IllegalArgumentException => retryMount(source, mountPoint)
# MAGIC case e: Exception => throw new RuntimeException(s"*** ERROR: Unable to mount $mountPoint: ${e.getMessage}", e)
# MAGIC }
# MAGIC }
# MAGIC
# MAGIC def autoMount(fix:Boolean = false, failFast:Boolean = false, mountPoint:String = "/mnt/training"): Unit = {
# MAGIC val (cloud, region) = cloudAndRegion
# MAGIC spark.conf.set("com.databricks.training.cloud.name", cloud)
# MAGIC spark.conf.set("com.databricks.training.region.name", region)
# MAGIC if (cloud=="AWS") {
# MAGIC val (source, extraConfigs) = getAwsMapping(region)
# MAGIC val resultMsg = mountSource(fix, failFast, mountPoint, source, extraConfigs)
# MAGIC displayHTML(s"Mounting course-specific datasets to <b>$mountPoint</b>...<br/>"+resultMsg)
# MAGIC } else if (cloud=="Azure") {
# MAGIC val (source, extraConfigs) = initAzureDataSource(region)
# MAGIC val resultMsg = mountSource(fix, failFast, mountPoint, source, extraConfigs)
# MAGIC displayHTML(s"Mounting course-specific datasets to <b>$mountPoint</b>...<br/>"+resultMsg)
# MAGIC } else {
# MAGIC val (source, extraConfigs) = ("s3a://databricks-corp-training/common", Map[String,String]())
# MAGIC val resultMsg = mountSource(fix, failFast, mountPoint, source, extraConfigs)
# MAGIC displayHTML(s"Mounted course-specific datasets to <b>$mountPoint</b>.")
# MAGIC }
# MAGIC }
# MAGIC
# MAGIC def initAzureDataSource(azureRegion:String):(String,Map[String,String]) = {
# MAGIC val mapping = getAzureMapping(azureRegion)
# MAGIC val (source, config) = mapping
# MAGIC val (sasEntity, sasToken) = config.head
# MAGIC
# MAGIC val datasource = "%s\t%s\t%s".format(source, sasEntity, sasToken)
# MAGIC spark.conf.set("com.databricks.training.azure.datasource", datasource)
# MAGIC
# MAGIC return mapping
# MAGIC }
# MAGIC
# MAGIC def mountSource(fix:Boolean, failFast:Boolean, mountPoint:String, source:String, extraConfigs:Map[String,String]): String = {
# MAGIC val mntSource = source.replace(awsAuth+"@", "")
# MAGIC
# MAGIC if (dbutils.fs.mounts().map(_.mountPoint).contains(mountPoint)) {
# MAGIC val mount = dbutils.fs.mounts().filter(_.mountPoint == mountPoint).head
# MAGIC if (mount.source == mntSource) {
# MAGIC return s"""Datasets are already mounted to <b>$mountPoint</b>."""
# MAGIC
# MAGIC } else if (failFast) {
# MAGIC throw new IllegalStateException(s"Expected $mntSource but found ${mount.source}")
# MAGIC
# MAGIC } else if (fix) {
# MAGIC println(s"Unmounting existing datasets ($mountPoint from ${mount.source}).")
# MAGIC dbutils.fs.unmount(mountPoint)
# MAGIC mountSource(fix, failFast, mountPoint, source, extraConfigs)
# MAGIC } else {
# MAGIC return s"""<b style="color:red">Invalid Mounts!</b></br>
# MAGIC <ul>
# MAGIC <li>The training datasets you are using are from an unexpected source</li>
# MAGIC <li>Expected <b>$mntSource</b> but found <b>${mount.source}</b></li>
# MAGIC <li>Failure to address this issue may result in significant performance degradation. To address this issue:</li>
# MAGIC <ol>
# MAGIC <li>Insert a new cell after this one</li>
# MAGIC <li>In that new cell, run the command <code style="color:blue; font-weight:bold">%scala fixMounts()</code></li>
# MAGIC <li>Verify that the problem has been resolved.</li>
# MAGIC </ol>"""
# MAGIC }
# MAGIC } else {
# MAGIC println(s"""Mounting datasets to $mountPoint.""")
# MAGIC mount(source, extraConfigs, mountPoint)
# MAGIC return s"""Mounted datasets to <b>$mountPoint</b> from <b>$mntSource<b>."""
# MAGIC }
# MAGIC }
# MAGIC
# MAGIC def fixMounts(): Unit = {
# MAGIC autoMount(true)
# MAGIC }
# MAGIC
# MAGIC autoMount(true)
| 74.298507 | 188 | 0.650192 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
for stream in spark.streams.active:
stream.stop()
# COMMAND ----------
import pyspark.sql.functions as F
import re
course_name = "dewd"
username = spark.sql("SELECT current_user()").first()[0]
clean_username = re.sub("[^a-zA-Z0-9]", "_", username)
database = f"dbacademy_{clean_username}_{course_name}"
userhome = f"dbfs:/user/{username}/dbacademy/{course_name}"
print(f"""
username: {username}
userhome: {userhome}
database: {database}""")
dbutils.widgets.text("mode", "setup")
mode = dbutils.widgets.get("mode")
if mode == "reset" or mode == "clean":
spark.sql(f"DROP DATABASE IF EXISTS {database} CASCADE")
dbutils.fs.rm(userhome, True)
if mode != "clean":
spark.sql(f"CREATE DATABASE IF NOT EXISTS {database}")
spark.sql(f"USE {database}")
# COMMAND ----------
# MAGIC %run ./mount-datasets
# COMMAND ----------
sqlContext.setConf("spark.sql.shuffle.partitions", spark.sparkContext.defaultParallelism)
# COMMAND ----------
dataSource = "/mnt/training/healthcare"
dataLandingLocation = userhome + "/source"
bronzePath = userhome + "/bronze"
recordingsParsedPath = userhome + "/silver/recordings_parsed"
recordingsEnrichedPath = userhome + "/silver/recordings_enriched"
dailyAvgPath = userhome + "/gold/dailyAvg"
checkpointPath = userhome + "/checkpoints"
bronzeCheckpoint = userhome + "/checkpoints/bronze"
recordingsParsedCheckpoint = userhome + "/checkpoints/recordings_parsed"
recordingsEnrichedCheckpoint = userhome + "/checkpoints/recordings_enriched"
dailyAvgCheckpoint = userhome + "/checkpoints/dailyAvgPath"
# COMMAND ----------
class FileArrival:
def __init__(self):
self.source = dataSource + "/tracker/streaming/"
self.userdir = dataLandingLocation + "/"
self.curr_mo = 1
def newData(self, continuous=False):
if self.curr_mo > 12:
print("Data source exhausted\n")
elif continuous == True:
while self.curr_mo <= 12:
curr_file = f"{self.curr_mo:02}.json"
dbutils.fs.cp(self.source + curr_file, self.userdir + curr_file)
self.curr_mo += 1
else:
curr_file = f"{str(self.curr_mo).zfill(2)}.json"
dbutils.fs.cp(self.source + curr_file, self.userdir + curr_file)
self.curr_mo += 1
File = FileArrival()
| 27.876543 | 89 | 0.660393 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
class QueryGenerator:
def __init__(self, course, mode="normal"):
import re
import random
self.username = spark.sql("SELECT current_user()").first()[0]
self.userhome = f"dbfs:/user/{self.username}/{course}"
self.database = f"""dbacademy_{re.sub("[^a-zA-Z0-9]", "_", self.username)}_{course}"""
if mode == "reset":
spark.sql(f"DROP DATABASE IF EXISTS {self.database} CASCADE")
dbutils.fs.rm(self.userhome, True)
def config(self):
print(f"""
CREATE DATABASE {self.database}
LOCATION '{self.userhome}';
USE {self.database};
CREATE TABLE user_ping
(user_id STRING, ping INTEGER, time TIMESTAMP);
CREATE TABLE user_ids (user_id STRING);
INSERT INTO user_ids VALUES
("potato_luver"),
("beanbag_lyfe"),
("default_username"),
("the_king"),
("n00b"),
("frodo"),
("data_the_kid"),
("el_matador"),
("the_wiz");
CREATE FUNCTION get_ping()
RETURNS INT
RETURN int(rand() * 250);
CREATE FUNCTION is_active()
RETURNS BOOLEAN
RETURN CASE
WHEN rand() > .25 THEN true
ELSE false
END;
""")
def load(self):
print(f"""
INSERT INTO {self.database}.user_ping
SELECT *,
{self.database}.get_ping() ping,
current_timestamp() time
FROM {self.database}.user_ids
WHERE {self.database}.is_active()=true;
SELECT * FROM {self.database}.user_ping;
""")
def user_counts(self):
print(f"""
SELECT user_id, count(*) total_records
FROM {self.database}.user_ping
GROUP BY user_id
ORDER BY
total_records DESC,
user_id ASC;;
""")
def avg_ping(self):
print(f"""
SELECT user_id, window.end end_time, mean(ping) avg_ping
FROM {self.database}.user_ping
GROUP BY user_id, window(time, '3 minutes')
ORDER BY
end_time DESC,
user_id ASC;
""")
def summary(self):
print(f"""
SELECT user_id, min(time) first_seen, max(time) last_seen, count(*) total_records, avg(ping) total_avg_ping
FROM {self.database}.user_ping
GROUP BY user_id
ORDER BY user_id ASC
""")
| 22.752809 | 107 | 0.612399 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %run ./setup-updates
# COMMAND ----------
def merge_deduped_users():
spark.sql(f"""
CREATE OR REPLACE TEMP VIEW deduped_users AS
SELECT user_id, user_first_touch_timestamp, max(email) email, max(updated) updated
FROM users_update
GROUP BY user_id, user_first_touch_timestamp
""")
spark.sql(f"""
MERGE INTO users a
USING deduped_users b
ON a.user_id = b.user_id
WHEN MATCHED AND a.email IS NULL AND b.email IS NOT NULL THEN
UPDATE SET email = b.email, updated = b.updated
WHEN NOT MATCHED THEN INSERT *
""")
# COMMAND ----------
def merge_events_update():
spark.sql(f"""
MERGE INTO events a
USING events_update b
ON a.user_id = b.user_id AND a.event_timestamp = b.event_timestamp
WHEN NOT MATCHED AND b.traffic_source = 'email' THEN
INSERT *
""")
# COMMAND ----------
def merge_sales_update():
spark.sql(f"""
COPY INTO sales
FROM "{Paths.source}/sales/sales-30m.parquet"
FILEFORMAT = PARQUET
""")
# COMMAND ----------
merge_deduped_users()
merge_events_update()
merge_sales_update()
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE TABLE item_lookup AS
# MAGIC SELECT * FROM parquet.`${c.source}/products/products.parquet`
| 21 | 84 | 0.657622 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
import sys, subprocess, os
subprocess.check_call([sys.executable, "-m", "pip", "install", "git+https://github.com/databricks-academy/user-setup"])
from dbacademy import LessonConfig
LessonConfig.configure(course_name="Databases Tables and Views on Databricks", use_db=False)
LessonConfig.install_datasets(silent=True)
| 37.888889 | 119 | 0.787966 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %run ./setup
# COMMAND ----------
dbutils.fs.rm(f"{Paths.source}/sales/sales.csv", True)
dbutils.fs.cp(f"{Paths.source_uri}/sales/sales.csv", f"{Paths.source}/sales/sales.csv", True)
(spark
.read
.format("parquet")
.load(f"{Paths.source}/users/users.parquet")
.repartition(1)
.write
.format("org.apache.spark.sql.jdbc")
.option("url", f"jdbc:sqlite:/{username}_ecommerce.db")
.option("dbtable", "users")
.mode("overwrite")
.save())
| 22.545455 | 96 | 0.628627 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %run ./setup
# COMMAND ----------
def load_historical():
spark.sql(f"""
CREATE OR REPLACE TABLE events AS
SELECT * FROM parquet.`{Paths.source}/events/events.parquet`
""")
spark.sql(f"""
CREATE OR REPLACE TABLE users AS
SELECT *, current_timestamp() updated FROM parquet.`{Paths.source}/users/users.parquet`
""")
spark.sql(f"""
CREATE OR REPLACE TABLE sales AS
SELECT * FROM parquet.`{Paths.source}/sales/sales.parquet`
""")
# COMMAND ----------
load_historical()
| 19.037037 | 89 | 0.65 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %run ./sql-setup $course="meta" $mode="cleanup"
# COMMAND ----------
URI = "wasbs://[email protected]/databases_tables_and_views_on_databricks/v02"
# COMMAND ----------
dbutils.fs.cp(URI, f"{userhome}/datasets", True)
| 22.833333 | 103 | 0.677193 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %run ./setup-cleaned
# COMMAND ----------
def create_transactions():
spark.sql(f"""
CREATE OR REPLACE TABLE transactions AS
SELECT * FROM (
SELECT
user_id,
order_id,
transaction_timestamp,
total_item_quantity,
purchase_revenue_in_usd,
unique_items,
a.items_exploded.item_id item_id,
a.items_exploded.quantity quantity
FROM
( SELECT *, explode(items) items_exploded FROM sales ) a
INNER JOIN users b
ON a.email = b.email
) PIVOT (
sum(quantity) FOR item_id in (
'P_FOAM_K',
'M_STAN_Q',
'P_FOAM_S',
'M_PREM_Q',
'M_STAN_F',
'M_STAN_T',
'M_PREM_K',
'M_PREM_F',
'M_STAN_K',
'M_PREM_T',
'P_DOWN_S',
'P_DOWN_K'
)
)
""")
create_transactions()
| 19.636364 | 64 | 0.506064 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %run ./setup-load
# COMMAND ----------
def load_events_raw():
spark.sql(f"""
CREATE TABLE IF NOT EXISTS events_json
(key BINARY, offset INT, partition BIGINT, timestamp BIGINT, topic STRING, value BINARY)
USING JSON OPTIONS (path = "{Paths.source}/events/events-kafka.json");
""")
spark.sql(f"""
CREATE OR REPLACE TABLE events_raw
(key BINARY, offset BIGINT, partition BIGINT, timestamp BIGINT, topic STRING, value BINARY);
""")
spark.sql(f"""
INSERT INTO events_raw
SELECT * FROM events_json
""")
# COMMAND ----------
# lesson: nested data & advanced transformations
# Last Lab & Writing to Delta
def create_events_update():
spark.sql(f"""
CREATE OR REPLACE TEMP VIEW events_raw_json AS
SELECT from_json(cast(value as STRING), ("device STRING, ecommerce STRUCT< purchase_revenue_in_usd: DOUBLE, total_item_quantity: BIGINT, unique_items: BIGINT>, event_name STRING, event_previous_timestamp BIGINT, event_timestamp BIGINT, geo STRUCT< city: STRING, state: STRING>, items ARRAY< STRUCT< coupon: STRING, item_id: STRING, item_name: STRING, item_revenue_in_usd: DOUBLE, price_in_usd: DOUBLE, quantity: BIGINT>>, traffic_source STRING, user_first_touch_timestamp BIGINT, user_id STRING")) json
FROM events_raw
""")
spark.sql(f"""
CREATE OR REPLACE TEMP VIEW events_update AS
WITH deduped_events_raw AS (
SELECT max(json) json FROM events_raw_json
GROUP BY json.user_id, json.event_timestamp
)
SELECT json.* FROM deduped_events_raw
""")
# COMMAND ----------
# lesson: Writing delta
def create_users_update():
spark.sql(f"""
CREATE OR REPLACE TEMP VIEW users_update AS
SELECT *, current_timestamp() updated
FROM parquet.`{Paths.source}/users/users-30m.parquet`
""")
# COMMAND ----------
load_events_raw()
create_events_update()
create_users_update()
| 28.857143 | 504 | 0.693617 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
import pyspark.sql.functions as F
import re
class BuildEnvironmentVariables:
def __init__(self, username):
self.course_name = "eltsql"
self.source_uri = "wasbs://[email protected]/elt-with-spark-sql/v01"
self.username = username
self.working_dir = f"dbfs:/user/{self.username}/dbacademy/{self.course_name}"
self.userhome = self.working_dir # TEMPORARY BACKWARDS COMPATABILITY
clean_username = re.sub("[^a-zA-Z0-9]", "_", self.username)
self.database_name = f"{clean_username}_dbacademy_{self.course_name}"
self.database_location = f"{self.working_dir}/db"
self.source = f"{self.working_dir}/source_datasets"
self.base_path=f"{self.working_dir}/tables"
self.sales_table_path = f"{self.base_path}/sales"
self.users_table_path = f"{self.base_path}/users"
self.events_raw_table_path = f"{self.base_path}/events_raw"
self.events_clean_table_path = f"{self.base_path}/events_clean"
self.transactions_table_path = f"{self.base_path}/transactions"
self.clickpaths_table_path = f"{self.base_path}/clickpaths"
def set_hive_variables(self):
for (k, v) in self.__dict__.items():
spark.sql(f"SET c.{k} = {v}")
def __repr__(self):
return self.__dict__.__repr__().replace(", ", ",\n")
# COMMAND ----------
username = spark.sql("SELECT current_user()").first()[0]
dbacademy_env = BuildEnvironmentVariables(username)
Paths = dbacademy_env # Temporary backwards compatability
# Hack for backwards compatability
username = dbacademy_env.username
database = dbacademy_env.database_name
userhome = dbacademy_env.working_dir
print(f"username: {username}")
print(f"database: {database}")
print(f"userhome: {userhome}")
# print(f"dbacademy_env: Databricks Academy configuration object")
# print(f"dbacademy_env.username: {dbacademy_env.username}")
# print(f"dbacademy_env.database_name: {dbacademy_env.database_name}")
# print(f"dbacademy_env.working_dir: {dbacademy_env.working_dir}")
# COMMAND ----------
def path_exists(path):
try:
return len(dbutils.fs.ls(path)) >= 0
except Exception:
return False
dbutils.widgets.text("mode", "default")
mode = dbutils.widgets.get("mode")
if mode == "reset" or mode == "cleanup":
# Drop the database and remove all data for both reset and cleanup
print(f"Removing the database {database}")
spark.sql(f"DROP DATABASE IF EXISTS {database} CASCADE")
print(f"Removing previously generated datasets from\n{dbacademy_env.working_dir}")
dbutils.fs.rm(dbacademy_env.working_dir, True)
if mode != "cleanup":
# We are not cleaning up so we want to setup the environment
# RESET is in case we want to force a reset
# not-existing for net-new install
if mode == "reset" or not path_exists(dbacademy_env.source):
print(f"\nInstalling datasets to\n{dbacademy_env.source}")
print(f"""\nNOTE: The datasets that we are installing are located in Washington, USA - depending on the
region that your workspace is in, this operation can take as little as 3 minutes and
upwards to 6 minutes, but this is a one-time operation.""")
dbutils.fs.cp(dbacademy_env.source_uri, dbacademy_env.source, True)
print(f"""\nThe install of the datasets completed successfully.""")
# Create the database and use it.
spark.sql(f"CREATE DATABASE IF NOT EXISTS {dbacademy_env.database_name} LOCATION '{dbacademy_env.database_location}'")
spark.sql(f"USE {dbacademy_env.database_name}")
# Once the database is created, init the hive variables
dbacademy_env.set_hive_variables()
| 37.585859 | 122 | 0.665357 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
import pyspark.sql.functions as F
import re
dbutils.widgets.text("course", "dewd")
course = dbutils.widgets.get("course")
username = spark.sql("SELECT current_user()").collect()[0][0]
userhome = f"dbfs:/user/{username}/{course}"
database = f"""{course}_{re.sub("[^a-zA-Z0-9]", "_", username)}_db"""
print(f"""
username: {username}
userhome: {userhome}
database: {database}""")
spark.sql(f"SET c.username = {username}")
spark.sql(f"SET c.userhome = {userhome}")
spark.sql(f"SET c.database = {database}")
dbutils.widgets.text("mode", "setup")
mode = dbutils.widgets.get("mode")
if mode == "reset":
spark.sql(f"DROP DATABASE IF EXISTS {database} CASCADE")
dbutils.fs.rm(userhome, True)
spark.sql(f"CREATE DATABASE IF NOT EXISTS {database} LOCATION '{userhome}'")
spark.sql(f"USE {database}")
if mode == "cleanup":
spark.sql(f"DROP DATABASE IF EXISTS {database} CASCADE")
dbutils.fs.rm(userhome, True)
| 28.060606 | 80 | 0.683716 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md # Project Information
# MAGIC
# MAGIC * Name: **Data Engineering with Databricks**
# MAGIC * Version: **beta.2**
# MAGIC * Built On: **Jan 19, 2022 at 14:41:04 UTC**
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 40.695652 | 192 | 0.679541 |
data-engineering-with-databricks | https://github.com/Code360In/data-engineering-with-databricks | null | 6 | 1,141 | 2023-11-22 18:01:10+00:00 | 2022-01-28 23:53:31+00:00 | 1,349 | Creative Commons Zero v1.0 Universal | Python | # Databricks notebook source
# MAGIC %md
# MAGIC # Population Heatmap by State
# MAGIC Using *uszips.csv* as a data source, aggregate the populations by state.
# MAGIC Note: data file provided courtesy of SimpleMaps (https://simplemaps.com/data/us-zips)
# COMMAND ----------
# MAGIC %md
# MAGIC Source a Notebook to configure the table name. If `my_name` resides in a different relative path, then adjust the code in **Cmd 3** accordingly.
# COMMAND ----------
# MAGIC %run ./my_name
# COMMAND ----------
# MAGIC %md
# MAGIC Query the table that was named in the `my_name` Notebook. Aggregate the population, grouping by state. For maximum effectiveness, select the **Map** plot to visualize the output.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT `state_id` AS `state`,SUM(`population`) AS `population`
# MAGIC FROM ${conf.name}
# MAGIC WHERE `state_id` NOT IN ('AS','GU','MP','PR','VI')
# MAGIC GROUP BY `state`
| 32.142857 | 186 | 0.686084 |
sfguide-data-engineering-with-snowpark-python | https://github.com/Snowflake-Labs/sfguide-data-engineering-with-snowpark-python | null | 53 | 2,339 | 2023-11-18 06:33:34+00:00 | 2023-01-23 16:06:45+00:00 | 513 | Apache License 2.0 | Python | import sys;
import os;
ignore_folders = ['__pycache__', '.ipynb_checkpoints']
if len(sys.argv) != 2:
print("Root directory is required")
exit()
root_directory = sys.argv[1]
print(f"Deploying all Snowpark apps in root directory {root_directory}")
# Walk the entire directory structure recursively
for (directory_path, directory_names, file_names) in os.walk(root_directory):
# Get just the last/final folder name in the directory path
base_name = os.path.basename(directory_path)
# Skip any folders we want to ignore
if base_name in ignore_folders:
# print(f"Skipping ignored folder {directory_path}")
continue
# An app.toml file in the folder is our indication that this folder contains
# a snowcli Snowpark App
if not "app.toml" in file_names:
# print(f"Skipping non-app folder {directory_path}")
continue
# Next determine what type of app it is
app_type = "unknown"
if "local_connection.py" in file_names:
app_type = "procedure"
else:
app_type = "function"
# Finally deploy the app with the snowcli tool
print(f"Found {app_type} app in folder {directory_path}")
print(f"Calling snowcli to deploy the {app_type} app")
os.chdir(f"{directory_path}")
# snow login will update the app.toml file with the correct path to the snowsql config file
os.system(f"snow login -c {root_directory}/config -C dev")
os.system(f"snow {app_type} create")
| 33.232558 | 95 | 0.681849 |
sfguide-data-engineering-with-snowpark-python | https://github.com/Snowflake-Labs/sfguide-data-engineering-with-snowpark-python | null | 53 | 2,339 | 2023-11-18 06:33:34+00:00 | 2023-01-23 16:06:45+00:00 | 513 | Apache License 2.0 | Python | #------------------------------------------------------------------------------
# Hands-On Lab: Data Engineering with Snowpark
# Script: 02_load_raw.py
# Author: Jeremiah Hansen, Caleb Baechtold
# Last Updated: 1/9/2023
#------------------------------------------------------------------------------
import time
from snowflake.snowpark import Session
#import snowflake.snowpark.types as T
#import snowflake.snowpark.functions as F
POS_TABLES = ['country', 'franchise', 'location', 'menu', 'truck', 'order_header', 'order_detail']
CUSTOMER_TABLES = ['customer_loyalty']
TABLE_DICT = {
"pos": {"schema": "RAW_POS", "tables": POS_TABLES},
"customer": {"schema": "RAW_CUSTOMER", "tables": CUSTOMER_TABLES}
}
# SNOWFLAKE ADVANTAGE: Schema detection
# SNOWFLAKE ADVANTAGE: Data ingestion with COPY
# SNOWFLAKE ADVANTAGE: Snowflake Tables (not file-based)
def load_raw_table(session, tname=None, s3dir=None, year=None, schema=None):
session.use_schema(schema)
if year is None:
location = "@external.frostbyte_raw_stage/{}/{}".format(s3dir, tname)
else:
print('\tLoading year {}'.format(year))
location = "@external.frostbyte_raw_stage/{}/{}/year={}".format(s3dir, tname, year)
# we can infer schema using the parquet read option
df = session.read.option("compression", "snappy") \
.parquet(location)
df.copy_into_table("{}".format(tname))
# SNOWFLAKE ADVANTAGE: Warehouse elasticity (dynamic scaling)
def load_all_raw_tables(session):
_ = session.sql("ALTER WAREHOUSE HOL_WH SET WAREHOUSE_SIZE = XLARGE WAIT_FOR_COMPLETION = TRUE").collect()
for s3dir, data in TABLE_DICT.items():
tnames = data['tables']
schema = data['schema']
for tname in tnames:
print("Loading {}".format(tname))
# Only load the first 3 years of data for the order tables at this point
# We will load the 2022 data later in the lab
if tname in ['order_header', 'order_detail']:
for year in ['2019', '2020', '2021']:
load_raw_table(session, tname=tname, s3dir=s3dir, year=year, schema=schema)
else:
load_raw_table(session, tname=tname, s3dir=s3dir, schema=schema)
_ = session.sql("ALTER WAREHOUSE HOL_WH SET WAREHOUSE_SIZE = XSMALL").collect()
def validate_raw_tables(session):
# check column names from the inferred schema
for tname in POS_TABLES:
print('{}: \n\t{}\n'.format(tname, session.table('RAW_POS.{}'.format(tname)).columns))
for tname in CUSTOMER_TABLES:
print('{}: \n\t{}\n'.format(tname, session.table('RAW_CUSTOMER.{}'.format(tname)).columns))
# For local debugging
if __name__ == "__main__":
# Add the utils package to our path and import the snowpark_utils function
import os, sys
current_dir = os.getcwd()
parent_dir = os.path.dirname(current_dir)
sys.path.append(parent_dir)
from utils import snowpark_utils
session = snowpark_utils.get_snowpark_session()
load_all_raw_tables(session)
# validate_raw_tables(session)
session.close()
| 37.378049 | 110 | 0.617292 |
sfguide-data-engineering-with-snowpark-python | https://github.com/Snowflake-Labs/sfguide-data-engineering-with-snowpark-python | null | 53 | 2,339 | 2023-11-18 06:33:34+00:00 | 2023-01-23 16:06:45+00:00 | 513 | Apache License 2.0 | Python | #------------------------------------------------------------------------------
# Hands-On Lab: Data Engineering with Snowpark
# Script: 04_create_order_view.py
# Author: Jeremiah Hansen, Caleb Baechtold
# Last Updated: 1/9/2023
#------------------------------------------------------------------------------
# SNOWFLAKE ADVANTAGE: Snowpark DataFrame API
# SNOWFLAKE ADVANTAGE: Streams for incremental processing (CDC)
# SNOWFLAKE ADVANTAGE: Streams on views
from snowflake.snowpark import Session
#import snowflake.snowpark.types as T
import snowflake.snowpark.functions as F
def create_pos_view(session):
session.use_schema('HARMONIZED')
order_detail = session.table("RAW_POS.ORDER_DETAIL").select(F.col("ORDER_DETAIL_ID"), \
F.col("LINE_NUMBER"), \
F.col("MENU_ITEM_ID"), \
F.col("QUANTITY"), \
F.col("UNIT_PRICE"), \
F.col("PRICE"), \
F.col("ORDER_ID"))
order_header = session.table("RAW_POS.ORDER_HEADER").select(F.col("ORDER_ID"), \
F.col("TRUCK_ID"), \
F.col("ORDER_TS"), \
F.to_date(F.col("ORDER_TS")).alias("ORDER_TS_DATE"), \
F.col("ORDER_AMOUNT"), \
F.col("ORDER_TAX_AMOUNT"), \
F.col("ORDER_DISCOUNT_AMOUNT"), \
F.col("LOCATION_ID"), \
F.col("ORDER_TOTAL"))
truck = session.table("RAW_POS.TRUCK").select(F.col("TRUCK_ID"), \
F.col("PRIMARY_CITY"), \
F.col("REGION"), \
F.col("COUNTRY"), \
F.col("FRANCHISE_FLAG"), \
F.col("FRANCHISE_ID"))
menu = session.table("RAW_POS.MENU").select(F.col("MENU_ITEM_ID"), \
F.col("TRUCK_BRAND_NAME"), \
F.col("MENU_TYPE"), \
F.col("MENU_ITEM_NAME"))
franchise = session.table("RAW_POS.FRANCHISE").select(F.col("FRANCHISE_ID"), \
F.col("FIRST_NAME").alias("FRANCHISEE_FIRST_NAME"), \
F.col("LAST_NAME").alias("FRANCHISEE_LAST_NAME"))
location = session.table("RAW_POS.LOCATION").select(F.col("LOCATION_ID"))
'''
We can do this one of two ways: either select before the join so it is more explicit, or just join on the full tables.
The end result is the same, it's mostly a readibility question.
'''
# order_detail = session.table("RAW_POS.ORDER_DETAIL")
# order_header = session.table("RAW_POS.ORDER_HEADER")
# truck = session.table("RAW_POS.TRUCK")
# menu = session.table("RAW_POS.MENU")
# franchise = session.table("RAW_POS.FRANCHISE")
# location = session.table("RAW_POS.LOCATION")
t_with_f = truck.join(franchise, truck['FRANCHISE_ID'] == franchise['FRANCHISE_ID'], rsuffix='_f')
oh_w_t_and_l = order_header.join(t_with_f, order_header['TRUCK_ID'] == t_with_f['TRUCK_ID'], rsuffix='_t') \
.join(location, order_header['LOCATION_ID'] == location['LOCATION_ID'], rsuffix='_l')
final_df = order_detail.join(oh_w_t_and_l, order_detail['ORDER_ID'] == oh_w_t_and_l['ORDER_ID'], rsuffix='_oh') \
.join(menu, order_detail['MENU_ITEM_ID'] == menu['MENU_ITEM_ID'], rsuffix='_m')
final_df = final_df.select(F.col("ORDER_ID"), \
F.col("TRUCK_ID"), \
F.col("ORDER_TS"), \
F.col('ORDER_TS_DATE'), \
F.col("ORDER_DETAIL_ID"), \
F.col("LINE_NUMBER"), \
F.col("TRUCK_BRAND_NAME"), \
F.col("MENU_TYPE"), \
F.col("PRIMARY_CITY"), \
F.col("REGION"), \
F.col("COUNTRY"), \
F.col("FRANCHISE_FLAG"), \
F.col("FRANCHISE_ID"), \
F.col("FRANCHISEE_FIRST_NAME"), \
F.col("FRANCHISEE_LAST_NAME"), \
F.col("LOCATION_ID"), \
F.col("MENU_ITEM_ID"), \
F.col("MENU_ITEM_NAME"), \
F.col("QUANTITY"), \
F.col("UNIT_PRICE"), \
F.col("PRICE"), \
F.col("ORDER_AMOUNT"), \
F.col("ORDER_TAX_AMOUNT"), \
F.col("ORDER_DISCOUNT_AMOUNT"), \
F.col("ORDER_TOTAL"))
final_df.create_or_replace_view('POS_FLATTENED_V')
def create_pos_view_stream(session):
session.use_schema('HARMONIZED')
_ = session.sql('CREATE OR REPLACE STREAM POS_FLATTENED_V_STREAM \
ON VIEW POS_FLATTENED_V \
SHOW_INITIAL_ROWS = TRUE').collect()
def test_pos_view(session):
session.use_schema('HARMONIZED')
tv = session.table('POS_FLATTENED_V')
tv.limit(5).show()
# For local debugging
if __name__ == "__main__":
# Add the utils package to our path and import the snowpark_utils function
import os, sys
current_dir = os.getcwd()
parent_dir = os.path.dirname(current_dir)
sys.path.append(parent_dir)
from utils import snowpark_utils
session = snowpark_utils.get_snowpark_session()
create_pos_view(session)
create_pos_view_stream(session)
# test_pos_view(session)
session.close()
| 51.894309 | 122 | 0.421983 |
sfguide-data-engineering-with-snowpark-python | https://github.com/Snowflake-Labs/sfguide-data-engineering-with-snowpark-python | null | 53 | 2,339 | 2023-11-18 06:33:34+00:00 | 2023-01-23 16:06:45+00:00 | 513 | Apache License 2.0 | Python | #------------------------------------------------------------------------------
# Hands-On Lab: Data Engineering with Snowpark
# Script: 07_daily_city_metrics_process_sp/app.py
# Author: Jeremiah Hansen, Caleb Baechtold
# Last Updated: 1/9/2023
#------------------------------------------------------------------------------
import time
from snowflake.snowpark import Session
import snowflake.snowpark.types as T
import snowflake.snowpark.functions as F
def table_exists(session, schema='', name=''):
exists = session.sql("SELECT EXISTS (SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '{}' AND TABLE_NAME = '{}') AS TABLE_EXISTS".format(schema, name)).collect()[0]['TABLE_EXISTS']
return exists
def create_daily_city_metrics_table(session):
SHARED_COLUMNS= [T.StructField("DATE", T.DateType()),
T.StructField("CITY_NAME", T.StringType()),
T.StructField("COUNTRY_DESC", T.StringType()),
T.StructField("DAILY_SALES", T.StringType()),
T.StructField("AVG_TEMPERATURE_FAHRENHEIT", T.DecimalType()),
T.StructField("AVG_TEMPERATURE_CELSIUS", T.DecimalType()),
T.StructField("AVG_PRECIPITATION_INCHES", T.DecimalType()),
T.StructField("AVG_PRECIPITATION_MILLIMETERS", T.DecimalType()),
T.StructField("MAX_WIND_SPEED_100M_MPH", T.DecimalType()),
]
DAILY_CITY_METRICS_COLUMNS = [*SHARED_COLUMNS, T.StructField("META_UPDATED_AT", T.TimestampType())]
DAILY_CITY_METRICS_SCHEMA = T.StructType(DAILY_CITY_METRICS_COLUMNS)
dcm = session.create_dataframe([[None]*len(DAILY_CITY_METRICS_SCHEMA.names)], schema=DAILY_CITY_METRICS_SCHEMA) \
.na.drop() \
.write.mode('overwrite').save_as_table('ANALYTICS.DAILY_CITY_METRICS')
dcm = session.table('ANALYTICS.DAILY_CITY_METRICS')
def merge_daily_city_metrics(session):
_ = session.sql('ALTER WAREHOUSE HOL_WH SET WAREHOUSE_SIZE = XLARGE WAIT_FOR_COMPLETION = TRUE').collect()
print("{} records in stream".format(session.table('HARMONIZED.ORDERS_STREAM').count()))
orders_stream_dates = session.table('HARMONIZED.ORDERS_STREAM').select(F.col("ORDER_TS_DATE").alias("DATE")).distinct()
orders_stream_dates.limit(5).show()
orders = session.table("HARMONIZED.ORDERS_STREAM").group_by(F.col('ORDER_TS_DATE'), F.col('PRIMARY_CITY'), F.col('COUNTRY')) \
.agg(F.sum(F.col("PRICE")).as_("price_nulls")) \
.with_column("DAILY_SALES", F.call_builtin("ZEROIFNULL", F.col("price_nulls"))) \
.select(F.col('ORDER_TS_DATE').alias("DATE"), F.col("PRIMARY_CITY").alias("CITY_NAME"), \
F.col("COUNTRY").alias("COUNTRY_DESC"), F.col("DAILY_SALES"))
# orders.limit(5).show()
weather_pc = session.table("FROSTBYTE_WEATHERSOURCE.ONPOINT_ID.POSTAL_CODES")
countries = session.table("RAW_POS.COUNTRY")
weather = session.table("FROSTBYTE_WEATHERSOURCE.ONPOINT_ID.HISTORY_DAY")
weather = weather.join(weather_pc, (weather['POSTAL_CODE'] == weather_pc['POSTAL_CODE']) & (weather['COUNTRY'] == weather_pc['COUNTRY']), rsuffix='_pc')
weather = weather.join(countries, (weather['COUNTRY'] == countries['ISO_COUNTRY']) & (weather['CITY_NAME'] == countries['CITY']), rsuffix='_c')
weather = weather.join(orders_stream_dates, weather['DATE_VALID_STD'] == orders_stream_dates['DATE'])
weather_agg = weather.group_by(F.col('DATE_VALID_STD'), F.col('CITY_NAME'), F.col('COUNTRY_C')) \
.agg( \
F.avg('AVG_TEMPERATURE_AIR_2M_F').alias("AVG_TEMPERATURE_F"), \
F.avg(F.call_udf("ANALYTICS.FAHRENHEIT_TO_CELSIUS_UDF", F.col("AVG_TEMPERATURE_AIR_2M_F"))).alias("AVG_TEMPERATURE_C"), \
F.avg("TOT_PRECIPITATION_IN").alias("AVG_PRECIPITATION_IN"), \
F.avg(F.call_udf("ANALYTICS.INCH_TO_MILLIMETER_UDF", F.col("TOT_PRECIPITATION_IN"))).alias("AVG_PRECIPITATION_MM"), \
F.max(F.col("MAX_WIND_SPEED_100M_MPH")).alias("MAX_WIND_SPEED_100M_MPH") \
) \
.select(F.col("DATE_VALID_STD").alias("DATE"), F.col("CITY_NAME"), F.col("COUNTRY_C").alias("COUNTRY_DESC"), \
F.round(F.col("AVG_TEMPERATURE_F"), 2).alias("AVG_TEMPERATURE_FAHRENHEIT"), \
F.round(F.col("AVG_TEMPERATURE_C"), 2).alias("AVG_TEMPERATURE_CELSIUS"), \
F.round(F.col("AVG_PRECIPITATION_IN"), 2).alias("AVG_PRECIPITATION_INCHES"), \
F.round(F.col("AVG_PRECIPITATION_MM"), 2).alias("AVG_PRECIPITATION_MILLIMETERS"), \
F.col("MAX_WIND_SPEED_100M_MPH")
)
# weather_agg.limit(5).show()
daily_city_metrics_stg = orders.join(weather_agg, (orders['DATE'] == weather_agg['DATE']) & (orders['CITY_NAME'] == weather_agg['CITY_NAME']) & (orders['COUNTRY_DESC'] == weather_agg['COUNTRY_DESC']), \
how='left', rsuffix='_w') \
.select("DATE", "CITY_NAME", "COUNTRY_DESC", "DAILY_SALES", \
"AVG_TEMPERATURE_FAHRENHEIT", "AVG_TEMPERATURE_CELSIUS", \
"AVG_PRECIPITATION_INCHES", "AVG_PRECIPITATION_MILLIMETERS", \
"MAX_WIND_SPEED_100M_MPH")
# daily_city_metrics_stg.limit(5).show()
cols_to_update = {c: daily_city_metrics_stg[c] for c in daily_city_metrics_stg.schema.names}
metadata_col_to_update = {"META_UPDATED_AT": F.current_timestamp()}
updates = {**cols_to_update, **metadata_col_to_update}
dcm = session.table('ANALYTICS.DAILY_CITY_METRICS')
dcm.merge(daily_city_metrics_stg, (dcm['DATE'] == daily_city_metrics_stg['DATE']) & (dcm['CITY_NAME'] == daily_city_metrics_stg['CITY_NAME']) & (dcm['COUNTRY_DESC'] == daily_city_metrics_stg['COUNTRY_DESC']), \
[F.when_matched().update(updates), F.when_not_matched().insert(updates)])
_ = session.sql('ALTER WAREHOUSE HOL_WH SET WAREHOUSE_SIZE = XSMALL').collect()
def main(session: Session) -> str:
# Create the DAILY_CITY_METRICS table if it doesn't exist
if not table_exists(session, schema='ANALYTICS', name='DAILY_CITY_METRICS'):
create_daily_city_metrics_table(session)
merge_daily_city_metrics(session)
# session.table('ANALYTICS.DAILY_CITY_METRICS').limit(5).show()
return f"Successfully processed DAILY_CITY_METRICS"
# For local debugging
# Be aware you may need to type-convert arguments if you add input parameters
if __name__ == '__main__':
# Add the utils package to our path and import the snowpark_utils function
import os, sys
current_dir = os.getcwd()
parent_parent_dir = os.path.dirname(os.path.dirname(current_dir))
sys.path.append(parent_parent_dir)
from utils import snowpark_utils
session = snowpark_utils.get_snowpark_session()
if len(sys.argv) > 1:
print(main(session, *sys.argv[1:])) # type: ignore
else:
print(main(session)) # type: ignore
session.close()
| 59.910569 | 214 | 0.578294 |
sfguide-data-engineering-with-snowpark-python | https://github.com/Snowflake-Labs/sfguide-data-engineering-with-snowpark-python | null | 53 | 2,339 | 2023-11-18 06:33:34+00:00 | 2023-01-23 16:06:45+00:00 | 513 | Apache License 2.0 | Python | from __future__ import annotations
import os
import configparser
from pathlib import Path
import toml
def get_dev_config(
environment: str = 'dev',
app_config_path: Path = Path.cwd().joinpath('app.toml'),
) -> dict:
try:
app_config = toml.load(app_config_path)
config = configparser.ConfigParser(inline_comment_prefixes="#")
if app_config['snowsql_config_path'].startswith('~'):
config.read(os.path.expanduser(app_config['snowsql_config_path']))
else:
config.read(app_config['snowsql_config_path'])
session_config = config[
'connections.' +
app_config['snowsql_connection_name']
]
session_config_dict = {
k.replace('name', ''): v.strip('"')
for k, v in session_config.items()
}
session_config_dict.update(app_config.get(environment)) # type: ignore
return session_config_dict
except Exception:
raise Exception(
"Error creating snowpark session - be sure you've logged into "
"the SnowCLI and have a valid app.toml file",
)
| 31.457143 | 79 | 0.602643 |
sfguide-data-engineering-with-snowpark-python | https://github.com/Snowflake-Labs/sfguide-data-engineering-with-snowpark-python | null | 53 | 2,339 | 2023-11-18 06:33:34+00:00 | 2023-01-23 16:06:45+00:00 | 513 | Apache License 2.0 | Python | 0 | 0 | 0 |
|
sfguide-data-engineering-with-snowpark-python | https://github.com/Snowflake-Labs/sfguide-data-engineering-with-snowpark-python | null | 53 | 2,339 | 2023-11-18 06:33:34+00:00 | 2023-01-23 16:06:45+00:00 | 513 | Apache License 2.0 | Python | from snowflake.snowpark import Session
import os
from typing import Optional
# Class to store a singleton connection option
class SnowflakeConnection(object):
_connection = None
@property
def connection(self) -> Optional[Session]:
return type(self)._connection
@connection.setter
def connection(self, val):
type(self)._connection = val
# Function to return a configured Snowpark session
def get_snowpark_session() -> Session:
# if running in snowflake
if SnowflakeConnection().connection:
# Not sure what this does?
session = SnowflakeConnection().connection
# if running locally with a config file
# TODO: Look for a creds.json style file. This should be the way all snowpark
# related tools work IMO
# if using snowsql config, like snowcli does
elif os.path.exists(os.path.expanduser('~/.snowsql/config')):
snowpark_config = get_snowsql_config()
SnowflakeConnection().connection = Session.builder.configs(snowpark_config).create()
# otherwise configure from environment variables
elif "SNOWSQL_ACCOUNT" in os.environ:
snowpark_config = {
"account": os.environ["SNOWSQL_ACCOUNT"],
"user": os.environ["SNOWSQL_USER"],
"password": os.environ["SNOWSQL_PWD"],
"role": os.environ["SNOWSQL_ROLE"],
"warehouse": os.environ["SNOWSQL_WAREHOUSE"],
"database": os.environ["SNOWSQL_DATABASE"],
"schema": os.environ["SNOWSQL_SCHEMA"]
}
SnowflakeConnection().connection = Session.builder.configs(snowpark_config).create()
if SnowflakeConnection().connection:
return SnowflakeConnection().connection # type: ignore
else:
raise Exception("Unable to create a Snowpark session")
# Mimic the snowcli logic for getting config details, but skip the app.toml processing
# since this will be called outside the snowcli app context.
# TODO: It would be nice to get rid of this entirely and always use creds.json but
# need to update snowcli to make that happen
def get_snowsql_config(
connection_name: str = 'dev',
config_file_path: str = os.path.expanduser('~/.snowsql/config'),
) -> dict:
import configparser
snowsql_to_snowpark_config_mapping = {
'account': 'account',
'accountname': 'account',
'username': 'user',
'password': 'password',
'rolename': 'role',
'warehousename': 'warehouse',
'dbname': 'database',
'schemaname': 'schema'
}
try:
config = configparser.ConfigParser(inline_comment_prefixes="#")
connection_path = 'connections.' + connection_name
config.read(config_file_path)
session_config = config[connection_path]
# Convert snowsql connection variable names to snowcli ones
session_config_dict = {
snowsql_to_snowpark_config_mapping[k]: v.strip('"')
for k, v in session_config.items()
}
return session_config_dict
except Exception:
raise Exception(
"Error getting snowsql config details"
)
| 35.917647 | 92 | 0.654128 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import os
import logging
from airflow import DAG
from airflow.utils.dates import days_ago
from airflow.operators.bash import BashOperator
from airflow.operators.python import PythonOperator
from google.cloud import storage
from airflow.providers.google.cloud.operators.bigquery import BigQueryCreateExternalTableOperator
import pyarrow.csv as pv
import pyarrow.parquet as pq
PROJECT_ID = os.environ.get("GCP_PROJECT_ID")
BUCKET = os.environ.get("GCP_GCS_BUCKET")
dataset_file = "yellow_tripdata_2021-01.csv"
dataset_url = f"https://s3.amazonaws.com/nyc-tlc/trip+data/{dataset_file}"
path_to_local_home = os.environ.get("AIRFLOW_HOME", "/opt/airflow/")
parquet_file = dataset_file.replace('.csv', '.parquet')
BIGQUERY_DATASET = os.environ.get("BIGQUERY_DATASET", 'trips_data_all')
def format_to_parquet(src_file):
if not src_file.endswith('.csv'):
logging.error("Can only accept source files in CSV format, for the moment")
return
table = pv.read_csv(src_file)
pq.write_table(table, src_file.replace('.csv', '.parquet'))
# NOTE: takes 20 mins, at an upload speed of 800kbps. Faster if your internet has a better upload speed
def upload_to_gcs(bucket, object_name, local_file):
"""
Ref: https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python
:param bucket: GCS bucket name
:param object_name: target path & file-name
:param local_file: source path & file-name
:return:
"""
# WORKAROUND to prevent timeout for files > 6 MB on 800 kbps upload speed.
# (Ref: https://github.com/googleapis/python-storage/issues/74)
storage.blob._MAX_MULTIPART_SIZE = 5 * 1024 * 1024 # 5 MB
storage.blob._DEFAULT_CHUNKSIZE = 5 * 1024 * 1024 # 5 MB
# End of Workaround
client = storage.Client()
bucket = client.bucket(bucket)
blob = bucket.blob(object_name)
blob.upload_from_filename(local_file)
default_args = {
"owner": "airflow",
"start_date": days_ago(1),
"depends_on_past": False,
"retries": 1,
}
# NOTE: DAG declaration - using a Context Manager (an implicit way)
with DAG(
dag_id="data_ingestion_gcs_dag",
schedule_interval="@daily",
default_args=default_args,
catchup=False,
max_active_runs=1,
tags=['dtc-de'],
) as dag:
download_dataset_task = BashOperator(
task_id="download_dataset_task",
bash_command=f"curl -sSL {dataset_url} > {path_to_local_home}/{dataset_file}"
)
format_to_parquet_task = PythonOperator(
task_id="format_to_parquet_task",
python_callable=format_to_parquet,
op_kwargs={
"src_file": f"{path_to_local_home}/{dataset_file}",
},
)
# TODO: Homework - research and try XCOM to communicate output values between 2 tasks/operators
local_to_gcs_task = PythonOperator(
task_id="local_to_gcs_task",
python_callable=upload_to_gcs,
op_kwargs={
"bucket": BUCKET,
"object_name": f"raw/{parquet_file}",
"local_file": f"{path_to_local_home}/{parquet_file}",
},
)
bigquery_external_table_task = BigQueryCreateExternalTableOperator(
task_id="bigquery_external_table_task",
table_resource={
"tableReference": {
"projectId": PROJECT_ID,
"datasetId": BIGQUERY_DATASET,
"tableId": "external_table",
},
"externalDataConfiguration": {
"sourceFormat": "PARQUET",
"sourceUris": [f"gs://{BUCKET}/raw/{parquet_file}"],
},
},
)
download_dataset_task >> format_to_parquet_task >> local_to_gcs_task >> bigquery_external_table_task
| 32.423423 | 104 | 0.65031 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import os
from datetime import datetime
from airflow import DAG
from airflow.operators.bash import BashOperator
from airflow.operators.python import PythonOperator
from ingest_script import ingest_callable
AIRFLOW_HOME = os.environ.get("AIRFLOW_HOME", "/opt/airflow/")
PG_HOST = os.getenv('PG_HOST')
PG_USER = os.getenv('PG_USER')
PG_PASSWORD = os.getenv('PG_PASSWORD')
PG_PORT = os.getenv('PG_PORT')
PG_DATABASE = os.getenv('PG_DATABASE')
local_workflow = DAG(
"LocalIngestionDag",
schedule_interval="0 6 2 * *",
start_date=datetime(2021, 1, 1)
)
URL_PREFIX = 'https://s3.amazonaws.com/nyc-tlc/trip+data'
URL_TEMPLATE = URL_PREFIX + '/yellow_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.csv'
OUTPUT_FILE_TEMPLATE = AIRFLOW_HOME + '/output_{{ execution_date.strftime(\'%Y-%m\') }}.csv'
TABLE_NAME_TEMPLATE = 'yellow_taxi_{{ execution_date.strftime(\'%Y_%m\') }}'
with local_workflow:
wget_task = BashOperator(
task_id='wget',
bash_command=f'curl -sSL {URL_TEMPLATE} > {OUTPUT_FILE_TEMPLATE}'
)
ingest_task = PythonOperator(
task_id="ingest",
python_callable=ingest_callable,
op_kwargs=dict(
user=PG_USER,
password=PG_PASSWORD,
host=PG_HOST,
port=PG_PORT,
db=PG_DATABASE,
table_name=TABLE_NAME_TEMPLATE,
csv_file=OUTPUT_FILE_TEMPLATE
),
)
wget_task >> ingest_task | 25.327273 | 92 | 0.639945 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import os
from time import time
import pandas as pd
from sqlalchemy import create_engine
def ingest_callable(user, password, host, port, db, table_name, csv_file, execution_date):
print(table_name, csv_file, execution_date)
engine = create_engine(f'postgresql://{user}:{password}@{host}:{port}/{db}')
engine.connect()
print('connection established successfully, inserting data...')
t_start = time()
df_iter = pd.read_csv(csv_file, iterator=True, chunksize=100000)
df = next(df_iter)
df.tpep_pickup_datetime = pd.to_datetime(df.tpep_pickup_datetime)
df.tpep_dropoff_datetime = pd.to_datetime(df.tpep_dropoff_datetime)
df.head(n=0).to_sql(name=table_name, con=engine, if_exists='replace')
df.to_sql(name=table_name, con=engine, if_exists='append')
t_end = time()
print('inserted the first chunk, took %.3f second' % (t_end - t_start))
while True:
t_start = time()
try:
df = next(df_iter)
except StopIteration:
print("completed")
break
df.tpep_pickup_datetime = pd.to_datetime(df.tpep_pickup_datetime)
df.tpep_dropoff_datetime = pd.to_datetime(df.tpep_dropoff_datetime)
df.to_sql(name=table_name, con=engine, if_exists='append')
t_end = time()
print('inserted another chunk, took %.3f second' % (t_end - t_start))
| 27.306122 | 90 | 0.6443 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import os
from datetime import datetime
from airflow import DAG
from airflow.utils.dates import days_ago
from airflow.operators.bash import BashOperator
from airflow.operators.python import PythonOperator
from google.cloud import storage
PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "pivotal-surfer-336713")
BUCKET = os.environ.get("GCP_GCS_BUCKET", "dtc_data_lake_pivotal-surfer-336713")
dataset_file = "yellow_tripdata_2021-01.csv"
dataset_url = f"https://s3.amazonaws.com/nyc-tlc/trip+data/{dataset_file}"
path_to_local_home = os.environ.get("AIRFLOW_HOME", "/opt/airflow/")
path_to_creds = f"{path_to_local_home}/google_credentials.json"
default_args = {
"owner": "airflow",
"start_date": days_ago(1),
"depends_on_past": False,
"retries": 1,
}
# # Takes 15-20 mins to run. Good case for using Spark (distributed processing, in place of chunks)
# def upload_to_gcs(bucket, object_name, local_file):
# """
# Ref: https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python
# :param bucket: GCS bucket name
# :param object_name: target path & file-name
# :param local_file: source path & file-name
# :return:
# """
# # WORKAROUND to prevent timeout for files > 6 MB on 800 kbps upload link.
# # (Ref: https://github.com/googleapis/python-storage/issues/74)
# storage.blob._MAX_MULTIPART_SIZE = 5 * 1024 * 1024 # 5 MB
# storage.blob._DEFAULT_CHUNKSIZE = 5 * 1024 * 1024 # 5 MB
#
# client = storage.Client()
# bucket = client.bucket(bucket)
#
# blob = bucket.blob(object_name)
# # blob.chunk_size = 5 * 1024 * 1024
# blob.upload_from_filename(local_file)
with DAG(
dag_id="data_ingestion_gcs_dag",
schedule_interval="@daily",
default_args=default_args,
catchup=True,
max_active_runs=1,
) as dag:
# Takes ~2 mins, depending upon your internet's download speed
download_dataset_task = BashOperator(
task_id="download_dataset_task",
bash_command=f"curl -sS {dataset_url} > {path_to_local_home}/{dataset_file}" # "&& unzip {zip_file} && rm {zip_file}"
)
# # APPROACH 1: (takes 20 mins, at an upload speed of 800Kbps. Faster if your internet has a better upload speed)
# upload_to_gcs_task = PythonOperator(
# task_id="upload_to_gcs_task",
# python_callable=upload_to_gcs,
# op_kwargs={
# "bucket": BUCKET,
# "object_name": f"raw/{dataset_file}",
# "local_file": f"{path_to_local_home}/{dataset_file}",
#
# },
# )
# OR APPROACH 2: (takes 20 mins, at an upload speed of 800Kbps. Faster if your internet has a better upload speed)
# Ref: https://cloud.google.com/blog/products/gcp/optimizing-your-cloud-storage-performance-google-cloud-performance-atlas
upload_to_gcs_task = BashOperator(
task_id="upload_to_gcs_task",
bash_command=f"gcloud auth activate-service-account --key-file={path_to_creds} && \
gsutil -m cp {path_to_local_home}/{dataset_file} gs://{BUCKET}",
)
download_dataset_task >> upload_to_gcs_task | 36.421687 | 128 | 0.66087 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import os
import logging
from datetime import datetime
from airflow import DAG
from airflow.utils.dates import days_ago
from airflow.operators.bash import BashOperator
from airflow.operators.python import PythonOperator
from google.cloud import storage
import pyarrow.csv as pv
import pyarrow.parquet as pq
PROJECT_ID = os.environ.get("GCP_PROJECT_ID")
BUCKET = os.environ.get("GCP_GCS_BUCKET")
AIRFLOW_HOME = os.environ.get("AIRFLOW_HOME", "/opt/airflow/")
def format_to_parquet(src_file, dest_file):
if not src_file.endswith('.csv'):
logging.error("Can only accept source files in CSV format, for the moment")
return
table = pv.read_csv(src_file)
pq.write_table(table, dest_file)
def upload_to_gcs(bucket, object_name, local_file):
client = storage.Client()
bucket = client.bucket(bucket)
blob = bucket.blob(object_name)
blob.upload_from_filename(local_file)
default_args = {
"owner": "airflow",
#"start_date": days_ago(1),
"depends_on_past": False,
"retries": 1,
}
def donwload_parquetize_upload_dag(
dag,
url_template,
local_csv_path_template,
local_parquet_path_template,
gcs_path_template
):
with dag:
download_dataset_task = BashOperator(
task_id="download_dataset_task",
bash_command=f"curl -sSLf {url_template} > {local_csv_path_template}"
)
format_to_parquet_task = PythonOperator(
task_id="format_to_parquet_task",
python_callable=format_to_parquet,
op_kwargs={
"src_file": local_csv_path_template,
"dest_file": local_parquet_path_template
},
)
local_to_gcs_task = PythonOperator(
task_id="local_to_gcs_task",
python_callable=upload_to_gcs,
op_kwargs={
"bucket": BUCKET,
"object_name": gcs_path_template,
"local_file": local_parquet_path_template,
},
)
rm_task = BashOperator(
task_id="rm_task",
bash_command=f"rm {local_csv_path_template} {local_parquet_path_template}"
)
download_dataset_task >> format_to_parquet_task >> local_to_gcs_task >> rm_task
URL_PREFIX = 'https://s3.amazonaws.com/nyc-tlc/trip+data'
YELLOW_TAXI_URL_TEMPLATE = URL_PREFIX + '/yellow_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.csv'
YELLOW_TAXI_CSV_FILE_TEMPLATE = AIRFLOW_HOME + '/yellow_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.csv'
YELLOW_TAXI_PARQUET_FILE_TEMPLATE = AIRFLOW_HOME + '/yellow_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.parquet'
YELLOW_TAXI_GCS_PATH_TEMPLATE = "raw/yellow_tripdata/{{ execution_date.strftime(\'%Y\') }}/yellow_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.parquet"
yellow_taxi_data_dag = DAG(
dag_id="yellow_taxi_data_v2",
schedule_interval="0 6 2 * *",
start_date=datetime(2019, 1, 1),
default_args=default_args,
catchup=True,
max_active_runs=3,
tags=['dtc-de'],
)
donwload_parquetize_upload_dag(
dag=yellow_taxi_data_dag,
url_template=YELLOW_TAXI_URL_TEMPLATE,
local_csv_path_template=YELLOW_TAXI_CSV_FILE_TEMPLATE,
local_parquet_path_template=YELLOW_TAXI_PARQUET_FILE_TEMPLATE,
gcs_path_template=YELLOW_TAXI_GCS_PATH_TEMPLATE
)
# https://s3.amazonaws.com/nyc-tlc/trip+data/green_tripdata_2021-01.csv
GREEN_TAXI_URL_TEMPLATE = URL_PREFIX + '/green_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.csv'
GREEN_TAXI_CSV_FILE_TEMPLATE = AIRFLOW_HOME + '/green_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.csv'
GREEN_TAXI_PARQUET_FILE_TEMPLATE = AIRFLOW_HOME + '/green_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.parquet'
GREEN_TAXI_GCS_PATH_TEMPLATE = "raw/green_tripdata/{{ execution_date.strftime(\'%Y\') }}/green_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.parquet"
green_taxi_data_dag = DAG(
dag_id="green_taxi_data_v1",
schedule_interval="0 7 2 * *",
start_date=datetime(2019, 1, 1),
default_args=default_args,
catchup=True,
max_active_runs=3,
tags=['dtc-de'],
)
donwload_parquetize_upload_dag(
dag=green_taxi_data_dag,
url_template=GREEN_TAXI_URL_TEMPLATE,
local_csv_path_template=GREEN_TAXI_CSV_FILE_TEMPLATE,
local_parquet_path_template=GREEN_TAXI_PARQUET_FILE_TEMPLATE,
gcs_path_template=GREEN_TAXI_GCS_PATH_TEMPLATE
)
# https://nyc-tlc.s3.amazonaws.com/trip+data/fhv_tripdata_2021-01.csv
FHV_TAXI_URL_TEMPLATE = URL_PREFIX + '/fhv_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.csv'
FHV_TAXI_CSV_FILE_TEMPLATE = AIRFLOW_HOME + '/fhv_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.csv'
FHV_TAXI_PARQUET_FILE_TEMPLATE = AIRFLOW_HOME + '/fhv_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.parquet'
FHV_TAXI_GCS_PATH_TEMPLATE = "raw/fhv_tripdata/{{ execution_date.strftime(\'%Y\') }}/fhv_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.parquet"
fhv_taxi_data_dag = DAG(
dag_id="hfv_taxi_data_v1",
schedule_interval="0 8 2 * *",
start_date=datetime(2019, 1, 1),
end_date=datetime(2020, 1, 1),
default_args=default_args,
catchup=True,
max_active_runs=3,
tags=['dtc-de'],
)
donwload_parquetize_upload_dag(
dag=fhv_taxi_data_dag,
url_template=FHV_TAXI_URL_TEMPLATE,
local_csv_path_template=FHV_TAXI_CSV_FILE_TEMPLATE,
local_parquet_path_template=FHV_TAXI_PARQUET_FILE_TEMPLATE,
gcs_path_template=FHV_TAXI_GCS_PATH_TEMPLATE
)
# https://s3.amazonaws.com/nyc-tlc/misc/taxi+_zone_lookup.csv
ZONES_URL_TEMPLATE = 'https://s3.amazonaws.com/nyc-tlc/misc/taxi+_zone_lookup.csv'
ZONES_CSV_FILE_TEMPLATE = AIRFLOW_HOME + '/taxi_zone_lookup.csv'
ZONES_PARQUET_FILE_TEMPLATE = AIRFLOW_HOME + '/taxi_zone_lookup.parquet'
ZONES_GCS_PATH_TEMPLATE = "raw/taxi_zone/taxi_zone_lookup.parquet"
zones_data_dag = DAG(
dag_id="zones_data_v1",
schedule_interval="@once",
start_date=days_ago(1),
default_args=default_args,
catchup=True,
max_active_runs=3,
tags=['dtc-de'],
)
donwload_parquetize_upload_dag(
dag=zones_data_dag,
url_template=ZONES_URL_TEMPLATE,
local_csv_path_template=ZONES_CSV_FILE_TEMPLATE,
local_parquet_path_template=ZONES_PARQUET_FILE_TEMPLATE,
gcs_path_template=ZONES_GCS_PATH_TEMPLATE
) | 32.393617 | 156 | 0.665127 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import os
import logging
from airflow import DAG
from airflow.utils.dates import days_ago
from airflow.providers.google.cloud.operators.bigquery import BigQueryCreateExternalTableOperator, BigQueryInsertJobOperator
from airflow.providers.google.cloud.transfers.gcs_to_gcs import GCSToGCSOperator
PROJECT_ID = os.environ.get("GCP_PROJECT_ID")
BUCKET = os.environ.get("GCP_GCS_BUCKET")
path_to_local_home = os.environ.get("AIRFLOW_HOME", "/opt/airflow/")
BIGQUERY_DATASET = os.environ.get("BIGQUERY_DATASET", 'trips_data_all')
DATASET = "tripdata"
COLOUR_RANGE = {'yellow': 'tpep_pickup_datetime', 'green': 'lpep_pickup_datetime'}
INPUT_PART = "raw"
INPUT_FILETYPE = "parquet"
default_args = {
"owner": "airflow",
"start_date": days_ago(1),
"depends_on_past": False,
"retries": 1,
}
# NOTE: DAG declaration - using a Context Manager (an implicit way)
with DAG(
dag_id="gcs_2_bq_dag",
schedule_interval="@daily",
default_args=default_args,
catchup=False,
max_active_runs=1,
tags=['dtc-de'],
) as dag:
for colour, ds_col in COLOUR_RANGE.items():
move_files_gcs_task = GCSToGCSOperator(
task_id=f'move_{colour}_{DATASET}_files_task',
source_bucket=BUCKET,
source_object=f'{INPUT_PART}/{colour}_{DATASET}*.{INPUT_FILETYPE}',
destination_bucket=BUCKET,
destination_object=f'{colour}/{colour}_{DATASET}',
move_object=True
)
bigquery_external_table_task = BigQueryCreateExternalTableOperator(
task_id=f"bq_{colour}_{DATASET}_external_table_task",
table_resource={
"tableReference": {
"projectId": PROJECT_ID,
"datasetId": BIGQUERY_DATASET,
"tableId": f"{colour}_{DATASET}_external_table",
},
"externalDataConfiguration": {
"autodetect": "True",
"sourceFormat": f"{INPUT_FILETYPE.upper()}",
"sourceUris": [f"gs://{BUCKET}/{colour}/*"],
},
},
)
CREATE_BQ_TBL_QUERY = (
f"CREATE OR REPLACE TABLE {BIGQUERY_DATASET}.{colour}_{DATASET} \
PARTITION BY DATE({ds_col}) \
AS \
SELECT * FROM {BIGQUERY_DATASET}.{colour}_{DATASET}_external_table;"
)
# Create a partitioned table from external table
bq_create_partitioned_table_job = BigQueryInsertJobOperator(
task_id=f"bq_create_{colour}_{DATASET}_partitioned_table_task",
configuration={
"query": {
"query": CREATE_BQ_TBL_QUERY,
"useLegacySql": False,
}
}
)
move_files_gcs_task >> bigquery_external_table_task >> bq_create_partitioned_table_job
| 33.890244 | 124 | 0.58951 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | from confluent_kafka import Producer
import argparse
import csv
from typing import Dict
from time import sleep
from settings import CONFLUENT_CLOUD_CONFIG, \
GREEN_TAXI_TOPIC, FHV_TAXI_TOPIC, \
GREEN_TRIP_DATA_PATH, FHV_TRIP_DATA_PATH
class RideCSVProducer:
def __init__(self, probs: Dict, ride_type: str):
self.producer = Producer(**probs)
self.ride_type = ride_type
def parse_row(self, row):
if self.ride_type == 'green':
record = f'{row[5]}, {row[6]}' # PULocationID, DOLocationID
key = str(row[0]) # vendor_id
elif self.ride_type == 'fhv':
record = f'{row[3]}, {row[4]}' # PULocationID, DOLocationID,
key = str(row[0]) # dispatching_base_num
return key, record
def read_records(self, resource_path: str):
records, ride_keys = [], []
with open(resource_path, 'r') as f:
reader = csv.reader(f)
header = next(reader) # skip the header
for row in reader:
key, record = self.parse_row(row)
ride_keys.append(key)
records.append(record)
return zip(ride_keys, records)
def publish(self, records: [str, str], topic: str):
for key_value in records:
key, value = key_value
try:
self.producer.poll(0)
self.producer.produce(topic=topic, key=key, value=value)
print(f"Producing record for <key: {key}, value:{value}>")
except KeyboardInterrupt:
break
except BufferError as bfer:
self.producer.poll(0.1)
except Exception as e:
print(f"Exception while producing record - {value}: {e}")
self.producer.flush()
sleep(10)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Kafka Consumer')
parser.add_argument('--type', type=str, default='green')
args = parser.parse_args()
if args.type == 'green':
kafka_topic = GREEN_TAXI_TOPIC
data_path = GREEN_TRIP_DATA_PATH
elif args.type == 'fhv':
kafka_topic = FHV_TAXI_TOPIC
data_path = FHV_TRIP_DATA_PATH
producer = RideCSVProducer(ride_type=args.type, probs=CONFLUENT_CLOUD_CONFIG)
ride_records = producer.read_records(resource_path=data_path)
producer.publish(records=ride_records, topic=kafka_topic)
| 32.819444 | 81 | 0.587921 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import pyspark.sql.types as T
INPUT_DATA_PATH = '../../resources/rides.csv'
BOOTSTRAP_SERVERS = 'localhost:9092'
TOPIC_WINDOWED_VENDOR_ID_COUNT = 'vendor_counts_windowed'
PRODUCE_TOPIC_RIDES_CSV = CONSUME_TOPIC_RIDES_CSV = 'rides_csv'
RIDE_SCHEMA = T.StructType(
[T.StructField("vendor_id", T.IntegerType()),
T.StructField('tpep_pickup_datetime', T.TimestampType()),
T.StructField('tpep_dropoff_datetime', T.TimestampType()),
T.StructField("passenger_count", T.IntegerType()),
T.StructField("trip_distance", T.FloatType()),
T.StructField("payment_type", T.IntegerType()),
T.StructField("total_amount", T.FloatType()),
])
| 34 | 63 | 0.691265 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | from pyspark.sql import SparkSession
import pyspark.sql.functions as F
from settings import CONFLUENT_CLOUD_CONFIG, GREEN_TAXI_TOPIC, FHV_TAXI_TOPIC, RIDES_TOPIC, ALL_RIDE_SCHEMA
def read_from_kafka(consume_topic: str):
# Spark Streaming DataFrame, connect to Kafka topic served at host in bootrap.servers option
df_stream = spark \
.readStream \
.format("kafka") \
.option("kafka.bootstrap.servers", CONFLUENT_CLOUD_CONFIG['bootstrap.servers']) \
.option("subscribe", consume_topic) \
.option("startingOffsets", "earliest") \
.option("checkpointLocation", "checkpoint") \
.option("kafka.security.protocol", "SASL_SSL") \
.option("kafka.sasl.mechanism", "PLAIN") \
.option("kafka.sasl.jaas.config",
f"""org.apache.kafka.common.security.plain.PlainLoginModule required username="{CONFLUENT_CLOUD_CONFIG['sasl.username']}" password="{CONFLUENT_CLOUD_CONFIG['sasl.password']}";""") \
.option("failOnDataLoss", False) \
.load()
return df_stream
def parse_rides(df, schema):
""" take a Spark Streaming df and parse value col based on <schema>, return streaming df cols in schema """
assert df.isStreaming is True, "DataFrame doesn't receive streaming data"
df = df.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
# split attributes to nested array in one Column
col = F.split(df['value'], ', ')
# expand col to multiple top-level columns
for idx, field in enumerate(schema):
df = df.withColumn(field.name, col.getItem(idx).cast(field.dataType))
df = df.na.drop()
df.printSchema()
return df.select([field.name for field in schema])
def sink_console(df, output_mode: str = 'complete', processing_time: str = '5 seconds'):
query = df.writeStream \
.outputMode(output_mode) \
.trigger(processingTime=processing_time) \
.format("console") \
.option("truncate", False) \
.start() \
.awaitTermination()
return query # pyspark.sql.streaming.StreamingQuery
def sink_kafka(df, topic, output_mode: str = 'complete'):
query = df.writeStream \
.format("kafka") \
.option("kafka.bootstrap.servers", "pkc-75m1o.europe-west3.gcp.confluent.cloud:9092") \
.outputMode(output_mode) \
.option("topic", topic) \
.option("checkpointLocation", "checkpoint") \
.option("kafka.security.protocol", "SASL_SSL") \
.option("kafka.sasl.mechanism", "PLAIN") \
.option("kafka.sasl.jaas.config",
f"""org.apache.kafka.common.security.plain.PlainLoginModule required username="{CONFLUENT_CLOUD_CONFIG['sasl.username']}" password="{CONFLUENT_CLOUD_CONFIG['sasl.password']}";""") \
.option("failOnDataLoss", False) \
.start()
return query
def op_groupby(df, column_names):
df_aggregation = df.groupBy(column_names).count()
return df_aggregation
if __name__ == "__main__":
spark = SparkSession.builder.appName('streaming-homework').getOrCreate()
spark.sparkContext.setLogLevel('WARN')
# Step 1: Consume GREEN_TAXI_TOPIC and FHV_TAXI_TOPIC
df_green_rides = read_from_kafka(consume_topic=GREEN_TAXI_TOPIC)
df_fhv_rides = read_from_kafka(consume_topic=FHV_TAXI_TOPIC)
# Step 2: Publish green and fhv rides to RIDES_TOPIC
kafka_sink_green_query = sink_kafka(df=df_green_rides, topic=RIDES_TOPIC, output_mode='append')
kafka_sink_fhv_query = sink_kafka(df=df_fhv_rides, topic=RIDES_TOPIC, output_mode='append')
# Step 3: Read RIDES_TOPIC and parse it in ALL_RIDE_SCHEMA
df_all_rides = read_from_kafka(consume_topic=RIDES_TOPIC)
df_all_rides = parse_rides(df_all_rides, ALL_RIDE_SCHEMA)
# Step 4: Apply Aggregation on the all_rides
df_pu_location_count = op_groupby(df_all_rides, ['PULocationID'])
df_pu_location_count = df_pu_location_count.sort(F.col('count').desc())
# Step 5: Sink Aggregation Streams to Console
console_sink_pu_location = sink_console(df_pu_location_count, output_mode='complete')
| 39.87 | 197 | 0.666911 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | #!/usr/bin/env python
# coding: utf-8
import os
import argparse
from time import time
import pandas as pd
from sqlalchemy import create_engine
def main(params):
user = params.user
password = params.password
host = params.host
port = params.port
db = params.db
table_name = params.table_name
url = params.url
# the backup files are gzipped, and it's important to keep the correct extension
# for pandas to be able to open the file
if url.endswith('.csv.gz'):
csv_name = 'output.csv.gz'
else:
csv_name = 'output.csv'
os.system(f"wget {url} -O {csv_name}")
engine = create_engine(f'postgresql://{user}:{password}@{host}:{port}/{db}')
df_iter = pd.read_csv(csv_name, iterator=True, chunksize=100000)
df = next(df_iter)
df.tpep_pickup_datetime = pd.to_datetime(df.tpep_pickup_datetime)
df.tpep_dropoff_datetime = pd.to_datetime(df.tpep_dropoff_datetime)
df.head(n=0).to_sql(name=table_name, con=engine, if_exists='replace')
df.to_sql(name=table_name, con=engine, if_exists='append')
while True:
try:
t_start = time()
df = next(df_iter)
df.tpep_pickup_datetime = pd.to_datetime(df.tpep_pickup_datetime)
df.tpep_dropoff_datetime = pd.to_datetime(df.tpep_dropoff_datetime)
df.to_sql(name=table_name, con=engine, if_exists='append')
t_end = time()
print('inserted another chunk, took %.3f second' % (t_end - t_start))
except StopIteration:
print("Finished ingesting data into the postgres database")
break
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Ingest CSV data to Postgres')
parser.add_argument('--user', required=True, help='user name for postgres')
parser.add_argument('--password', required=True, help='password for postgres')
parser.add_argument('--host', required=True, help='host for postgres')
parser.add_argument('--port', required=True, help='port for postgres')
parser.add_argument('--db', required=True, help='database name for postgres')
parser.add_argument('--table_name', required=True, help='name of the table where we will write the results to')
parser.add_argument('--url', required=True, help='url of the csv file')
args = parser.parse_args()
main(args)
| 29.417722 | 115 | 0.642798 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import sys
import pandas as pd
print(sys.argv)
day = sys.argv[1]
# some fancy stuff with pandas
print(f'job finished successfully for day = {day}') | 12.909091 | 51 | 0.723684 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import io
import os
import requests
import pandas as pd
from google.cloud import storage
"""
Pre-reqs:
1. `pip install pandas pyarrow google-cloud-storage`
2. Set GOOGLE_APPLICATION_CREDENTIALS to your project/service-account key
3. Set GCP_GCS_BUCKET as your bucket or change default value of BUCKET
"""
# services = ['fhv','green','yellow']
init_url = 'https://github.com/DataTalksClub/nyc-tlc-data/releases/download/'
# switch out the bucketname
BUCKET = os.environ.get("GCP_GCS_BUCKET", "dtc-data-lake-bucketname")
def upload_to_gcs(bucket, object_name, local_file):
"""
Ref: https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python
"""
# # WORKAROUND to prevent timeout for files > 6 MB on 800 kbps upload speed.
# # (Ref: https://github.com/googleapis/python-storage/issues/74)
# storage.blob._MAX_MULTIPART_SIZE = 5 * 1024 * 1024 # 5 MB
# storage.blob._DEFAULT_CHUNKSIZE = 5 * 1024 * 1024 # 5 MB
client = storage.Client()
bucket = client.bucket(bucket)
blob = bucket.blob(object_name)
blob.upload_from_filename(local_file)
def web_to_gcs(year, service):
for i in range(12):
# sets the month part of the file_name string
month = '0'+str(i+1)
month = month[-2:]
# csv file_name
file_name = f"{service}_tripdata_{year}-{month}.csv.gz"
# download it using requests via a pandas df
request_url = f"{init_url}{service}/{file_name}"
r = requests.get(request_url)
open(file_name, 'wb').write(r.content)
print(f"Local: {file_name}")
# read it back into a parquet file
df = pd.read_csv(file_name, compression='gzip')
file_name = file_name.replace('.csv.gz', '.parquet')
df.to_parquet(file_name, engine='pyarrow')
print(f"Parquet: {file_name}")
# upload it to gcs
upload_to_gcs(BUCKET, f"{service}/{file_name}", file_name)
print(f"GCS: {service}/{file_name}")
web_to_gcs('2019', 'green')
web_to_gcs('2020', 'green')
# web_to_gcs('2019', 'yellow')
# web_to_gcs('2020', 'yellow')
| 30.671642 | 93 | 0.642621 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | #!/usr/bin/env python
# coding: utf-8
import argparse
import pyspark
from pyspark.sql import SparkSession
from pyspark.sql import functions as F
parser = argparse.ArgumentParser()
parser.add_argument('--input_green', required=True)
parser.add_argument('--input_yellow', required=True)
parser.add_argument('--output', required=True)
args = parser.parse_args()
input_green = args.input_green
input_yellow = args.input_yellow
output = args.output
spark = SparkSession.builder \
.appName('test') \
.getOrCreate()
df_green = spark.read.parquet(input_green)
df_green = df_green \
.withColumnRenamed('lpep_pickup_datetime', 'pickup_datetime') \
.withColumnRenamed('lpep_dropoff_datetime', 'dropoff_datetime')
df_yellow = spark.read.parquet(input_yellow)
df_yellow = df_yellow \
.withColumnRenamed('tpep_pickup_datetime', 'pickup_datetime') \
.withColumnRenamed('tpep_dropoff_datetime', 'dropoff_datetime')
common_colums = [
'VendorID',
'pickup_datetime',
'dropoff_datetime',
'store_and_fwd_flag',
'RatecodeID',
'PULocationID',
'DOLocationID',
'passenger_count',
'trip_distance',
'fare_amount',
'extra',
'mta_tax',
'tip_amount',
'tolls_amount',
'improvement_surcharge',
'total_amount',
'payment_type',
'congestion_surcharge'
]
df_green_sel = df_green \
.select(common_colums) \
.withColumn('service_type', F.lit('green'))
df_yellow_sel = df_yellow \
.select(common_colums) \
.withColumn('service_type', F.lit('yellow'))
df_trips_data = df_green_sel.unionAll(df_yellow_sel)
df_trips_data.registerTempTable('trips_data')
df_result = spark.sql("""
SELECT
-- Reveneue grouping
PULocationID AS revenue_zone,
date_trunc('month', pickup_datetime) AS revenue_month,
service_type,
-- Revenue calculation
SUM(fare_amount) AS revenue_monthly_fare,
SUM(extra) AS revenue_monthly_extra,
SUM(mta_tax) AS revenue_monthly_mta_tax,
SUM(tip_amount) AS revenue_monthly_tip_amount,
SUM(tolls_amount) AS revenue_monthly_tolls_amount,
SUM(improvement_surcharge) AS revenue_monthly_improvement_surcharge,
SUM(total_amount) AS revenue_monthly_total_amount,
SUM(congestion_surcharge) AS revenue_monthly_congestion_surcharge,
-- Additional calculations
AVG(passenger_count) AS avg_montly_passenger_count,
AVG(trip_distance) AS avg_montly_trip_distance
FROM
trips_data
GROUP BY
1, 2, 3
""")
df_result.coalesce(1) \
.write.parquet(output, mode='overwrite')
| 21.75 | 72 | 0.690224 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | #!/usr/bin/env python
# coding: utf-8
import argparse
import pyspark
from pyspark.sql import SparkSession
from pyspark.sql import functions as F
parser = argparse.ArgumentParser()
parser.add_argument('--input_green', required=True)
parser.add_argument('--input_yellow', required=True)
parser.add_argument('--output', required=True)
args = parser.parse_args()
input_green = args.input_green
input_yellow = args.input_yellow
output = args.output
spark = SparkSession.builder \
.appName('test') \
.getOrCreate()
spark.conf.set('temporaryGcsBucket', 'dataproc-temp-europe-west6-828225226997-fckhkym8')
df_green = spark.read.parquet(input_green)
df_green = df_green \
.withColumnRenamed('lpep_pickup_datetime', 'pickup_datetime') \
.withColumnRenamed('lpep_dropoff_datetime', 'dropoff_datetime')
df_yellow = spark.read.parquet(input_yellow)
df_yellow = df_yellow \
.withColumnRenamed('tpep_pickup_datetime', 'pickup_datetime') \
.withColumnRenamed('tpep_dropoff_datetime', 'dropoff_datetime')
common_colums = [
'VendorID',
'pickup_datetime',
'dropoff_datetime',
'store_and_fwd_flag',
'RatecodeID',
'PULocationID',
'DOLocationID',
'passenger_count',
'trip_distance',
'fare_amount',
'extra',
'mta_tax',
'tip_amount',
'tolls_amount',
'improvement_surcharge',
'total_amount',
'payment_type',
'congestion_surcharge'
]
df_green_sel = df_green \
.select(common_colums) \
.withColumn('service_type', F.lit('green'))
df_yellow_sel = df_yellow \
.select(common_colums) \
.withColumn('service_type', F.lit('yellow'))
df_trips_data = df_green_sel.unionAll(df_yellow_sel)
df_trips_data.registerTempTable('trips_data')
df_result = spark.sql("""
SELECT
-- Reveneue grouping
PULocationID AS revenue_zone,
date_trunc('month', pickup_datetime) AS revenue_month,
service_type,
-- Revenue calculation
SUM(fare_amount) AS revenue_monthly_fare,
SUM(extra) AS revenue_monthly_extra,
SUM(mta_tax) AS revenue_monthly_mta_tax,
SUM(tip_amount) AS revenue_monthly_tip_amount,
SUM(tolls_amount) AS revenue_monthly_tolls_amount,
SUM(improvement_surcharge) AS revenue_monthly_improvement_surcharge,
SUM(total_amount) AS revenue_monthly_total_amount,
SUM(congestion_surcharge) AS revenue_monthly_congestion_surcharge,
-- Additional calculations
AVG(passenger_count) AS avg_montly_passenger_count,
AVG(trip_distance) AS avg_montly_trip_distance
FROM
trips_data
GROUP BY
1, 2, 3
""")
df_result.write.format('bigquery') \
.option('table', output) \
.save()
| 22.069565 | 88 | 0.690422 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import argparse
from typing import Dict, List
from kafka import KafkaConsumer
from settings import BOOTSTRAP_SERVERS, CONSUME_TOPIC_RIDES_CSV
class RideCSVConsumer:
def __init__(self, props: Dict):
self.consumer = KafkaConsumer(**props)
def consume_from_kafka(self, topics: List[str]):
self.consumer.subscribe(topics=topics)
print('Consuming from Kafka started')
print('Available topics to consume: ', self.consumer.subscription())
while True:
try:
# SIGINT can't be handled when polling, limit timeout to 1 second.
msg = self.consumer.poll(1.0)
if msg is None or msg == {}:
continue
for msg_key, msg_values in msg.items():
for msg_val in msg_values:
print(f'Key:{msg_val.key}-type({type(msg_val.key)}), '
f'Value:{msg_val.value}-type({type(msg_val.value)})')
except KeyboardInterrupt:
break
self.consumer.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Kafka Consumer')
parser.add_argument('--topic', type=str, default=CONSUME_TOPIC_RIDES_CSV)
args = parser.parse_args()
topic = args.topic
config = {
'bootstrap_servers': [BOOTSTRAP_SERVERS],
'auto_offset_reset': 'earliest',
'enable_auto_commit': True,
'key_deserializer': lambda key: int(key.decode('utf-8')),
'value_deserializer': lambda value: value.decode('utf-8'),
'group_id': 'consumer.group.id.csv-example.1',
}
csv_consumer = RideCSVConsumer(props=config)
csv_consumer.consume_from_kafka(topics=[topic])
| 35.25 | 83 | 0.59632 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import csv
from time import sleep
from typing import Dict
from kafka import KafkaProducer
from settings import BOOTSTRAP_SERVERS, INPUT_DATA_PATH, PRODUCE_TOPIC_RIDES_CSV
def delivery_report(err, msg):
if err is not None:
print("Delivery failed for record {}: {}".format(msg.key(), err))
return
print('Record {} successfully produced to {} [{}] at offset {}'.format(
msg.key(), msg.topic(), msg.partition(), msg.offset()))
class RideCSVProducer:
def __init__(self, props: Dict):
self.producer = KafkaProducer(**props)
# self.producer = Producer(producer_props)
@staticmethod
def read_records(resource_path: str):
records, ride_keys = [], []
i = 0
with open(resource_path, 'r') as f:
reader = csv.reader(f)
header = next(reader) # skip the header
for row in reader:
# vendor_id, passenger_count, trip_distance, payment_type, total_amount
records.append(f'{row[0]}, {row[1]}, {row[2]}, {row[3]}, {row[4]}, {row[9]}, {row[16]}')
ride_keys.append(str(row[0]))
i += 1
if i == 5:
break
return zip(ride_keys, records)
def publish(self, topic: str, records: [str, str]):
for key_value in records:
key, value = key_value
try:
self.producer.send(topic=topic, key=key, value=value)
print(f"Producing record for <key: {key}, value:{value}>")
except KeyboardInterrupt:
break
except Exception as e:
print(f"Exception while producing record - {value}: {e}")
self.producer.flush()
sleep(1)
if __name__ == "__main__":
config = {
'bootstrap_servers': [BOOTSTRAP_SERVERS],
'key_serializer': lambda x: x.encode('utf-8'),
'value_serializer': lambda x: x.encode('utf-8')
}
producer = RideCSVProducer(props=config)
ride_records = producer.read_records(resource_path=INPUT_DATA_PATH)
print(ride_records)
producer.publish(topic=PRODUCE_TOPIC_RIDES_CSV, records=ride_records)
| 33.571429 | 104 | 0.574644 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | from typing import List, Dict
class RideRecord:
def __init__(self, arr: List[str]):
self.vendor_id = int(arr[0])
self.passenger_count = int(arr[1])
self.trip_distance = float(arr[2])
self.payment_type = int(arr[3])
self.total_amount = float(arr[4])
@classmethod
def from_dict(cls, d: Dict):
return cls(arr=[
d['vendor_id'],
d['passenger_count'],
d['trip_distance'],
d['payment_type'],
d['total_amount']
]
)
def __repr__(self):
return f'{self.__class__.__name__}: {self.__dict__}'
def dict_to_ride_record(obj, ctx):
if obj is None:
return None
return RideRecord.from_dict(obj)
def ride_record_to_dict(ride_record: RideRecord, ctx):
return ride_record.__dict__
| 21.648649 | 60 | 0.540024 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | from typing import Dict
class RideRecordKey:
def __init__(self, vendor_id):
self.vendor_id = vendor_id
@classmethod
def from_dict(cls, d: Dict):
return cls(vendor_id=d['vendor_id'])
def __repr__(self):
return f'{self.__class__.__name__}: {self.__dict__}'
def dict_to_ride_record_key(obj, ctx):
if obj is None:
return None
return RideRecordKey.from_dict(obj)
def ride_record_key_to_dict(ride_record_key: RideRecordKey, ctx):
return ride_record_key.__dict__
| 20.04 | 65 | 0.619048 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | from typing import List, Dict
from decimal import Decimal
from datetime import datetime
class Ride:
def __init__(self, arr: List[str]):
self.vendor_id = arr[0]
self.tpep_pickup_datetime = datetime.strptime(arr[1], "%Y-%m-%d %H:%M:%S"),
self.tpep_dropoff_datetime = datetime.strptime(arr[2], "%Y-%m-%d %H:%M:%S"),
self.passenger_count = int(arr[3])
self.trip_distance = Decimal(arr[4])
self.rate_code_id = int(arr[5])
self.store_and_fwd_flag = arr[6]
self.pu_location_id = int(arr[7])
self.do_location_id = int(arr[8])
self.payment_type = arr[9]
self.fare_amount = Decimal(arr[10])
self.extra = Decimal(arr[11])
self.mta_tax = Decimal(arr[12])
self.tip_amount = Decimal(arr[13])
self.tolls_amount = Decimal(arr[14])
self.improvement_surcharge = Decimal(arr[15])
self.total_amount = Decimal(arr[16])
self.congestion_surcharge = Decimal(arr[17])
@classmethod
def from_dict(cls, d: Dict):
return cls(arr=[
d['vendor_id'],
d['tpep_pickup_datetime'][0],
d['tpep_dropoff_datetime'][0],
d['passenger_count'],
d['trip_distance'],
d['rate_code_id'],
d['store_and_fwd_flag'],
d['pu_location_id'],
d['do_location_id'],
d['payment_type'],
d['fare_amount'],
d['extra'],
d['mta_tax'],
d['tip_amount'],
d['tolls_amount'],
d['improvement_surcharge'],
d['total_amount'],
d['congestion_surcharge'],
]
)
def __repr__(self):
return f'{self.__class__.__name__}: {self.__dict__}'
| 32.396226 | 84 | 0.520068 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import faust
from taxi_rides import TaxiRide
from faust import current_event
app = faust.App('datatalksclub.stream.v3', broker='kafka://localhost:9092', consumer_auto_offset_reset="earliest")
topic = app.topic('datatalkclub.yellow_taxi_ride.json', value_type=TaxiRide)
high_amount_rides = app.topic('datatalks.yellow_taxi_rides.high_amount')
low_amount_rides = app.topic('datatalks.yellow_taxi_rides.low_amount')
@app.agent(topic)
async def process(stream):
async for event in stream:
if event.total_amount >= 40.0:
await current_event().forward(high_amount_rides)
else:
await current_event().forward(low_amount_rides)
if __name__ == '__main__':
app.main()
| 31.318182 | 114 | 0.701408 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import csv
from json import dumps
from kafka import KafkaProducer
from time import sleep
producer = KafkaProducer(bootstrap_servers=['localhost:9092'],
key_serializer=lambda x: dumps(x).encode('utf-8'),
value_serializer=lambda x: dumps(x).encode('utf-8'))
file = open('../../resources/rides.csv')
csvreader = csv.reader(file)
header = next(csvreader)
for row in csvreader:
key = {"vendorId": int(row[0])}
value = {"vendorId": int(row[0]), "passenger_count": int(row[3]), "trip_distance": float(row[4]), "payment_type": int(row[9]), "total_amount": float(row[16])}
producer.send('datatalkclub.yellow_taxi_ride.json', value=value, key=key)
print("producing")
sleep(1) | 36 | 162 | 0.648173 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import faust
from taxi_rides import TaxiRide
app = faust.App('datatalksclub.stream.v2', broker='kafka://localhost:9092')
topic = app.topic('datatalkclub.yellow_taxi_ride.json', value_type=TaxiRide)
@app.agent(topic)
async def start_reading(records):
async for record in records:
print(record)
if __name__ == '__main__':
app.main()
| 19.823529 | 76 | 0.694051 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import faust
from taxi_rides import TaxiRide
app = faust.App('datatalksclub.stream.v2', broker='kafka://localhost:9092')
topic = app.topic('datatalkclub.yellow_taxi_ride.json', value_type=TaxiRide)
vendor_rides = app.Table('vendor_rides', default=int)
@app.agent(topic)
async def process(stream):
async for event in stream.group_by(TaxiRide.vendorId):
vendor_rides[event.vendorId] += 1
if __name__ == '__main__':
app.main()
| 23.833333 | 76 | 0.704036 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | import faust
class TaxiRide(faust.Record, validation=True):
vendorId: str
passenger_count: int
trip_distance: float
payment_type: int
total_amount: float
| 16.7 | 46 | 0.704545 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | from datetime import timedelta
import faust
from taxi_rides import TaxiRide
app = faust.App('datatalksclub.stream.v2', broker='kafka://localhost:9092')
topic = app.topic('datatalkclub.yellow_taxi_ride.json', value_type=TaxiRide)
vendor_rides = app.Table('vendor_rides_windowed', default=int).tumbling(
timedelta(minutes=1),
expires=timedelta(hours=1),
)
@app.agent(topic)
async def process(stream):
async for event in stream.group_by(TaxiRide.vendorId):
vendor_rides[event.vendorId] += 1
if __name__ == '__main__':
app.main()
| 23.26087 | 76 | 0.710952 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Python | from pyspark.sql import SparkSession
import pyspark.sql.functions as F
from settings import RIDE_SCHEMA, CONSUME_TOPIC_RIDES_CSV, TOPIC_WINDOWED_VENDOR_ID_COUNT
def read_from_kafka(consume_topic: str):
# Spark Streaming DataFrame, connect to Kafka topic served at host in bootrap.servers option
df_stream = spark \
.readStream \
.format("kafka") \
.option("kafka.bootstrap.servers", "localhost:9092,broker:29092") \
.option("subscribe", consume_topic) \
.option("startingOffsets", "earliest") \
.option("checkpointLocation", "checkpoint") \
.load()
return df_stream
def parse_ride_from_kafka_message(df, schema):
""" take a Spark Streaming df and parse value col based on <schema>, return streaming df cols in schema """
assert df.isStreaming is True, "DataFrame doesn't receive streaming data"
df = df.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
# split attributes to nested array in one Column
col = F.split(df['value'], ', ')
# expand col to multiple top-level columns
for idx, field in enumerate(schema):
df = df.withColumn(field.name, col.getItem(idx).cast(field.dataType))
return df.select([field.name for field in schema])
def sink_console(df, output_mode: str = 'complete', processing_time: str = '5 seconds'):
write_query = df.writeStream \
.outputMode(output_mode) \
.trigger(processingTime=processing_time) \
.format("console") \
.option("truncate", False) \
.start()
return write_query # pyspark.sql.streaming.StreamingQuery
def sink_memory(df, query_name, query_template):
query_df = df \
.writeStream \
.queryName(query_name) \
.format("memory") \
.start()
query_str = query_template.format(table_name=query_name)
query_results = spark.sql(query_str)
return query_results, query_df
def sink_kafka(df, topic):
write_query = df.writeStream \
.format("kafka") \
.option("kafka.bootstrap.servers", "localhost:9092,broker:29092") \
.outputMode('complete') \
.option("topic", topic) \
.option("checkpointLocation", "checkpoint") \
.start()
return write_query
def prepare_df_to_kafka_sink(df, value_columns, key_column=None):
columns = df.columns
df = df.withColumn("value", F.concat_ws(', ', *value_columns))
if key_column:
df = df.withColumnRenamed(key_column, "key")
df = df.withColumn("key", df.key.cast('string'))
return df.select(['key', 'value'])
def op_groupby(df, column_names):
df_aggregation = df.groupBy(column_names).count()
return df_aggregation
def op_windowed_groupby(df, window_duration, slide_duration):
df_windowed_aggregation = df.groupBy(
F.window(timeColumn=df.tpep_pickup_datetime, windowDuration=window_duration, slideDuration=slide_duration),
df.vendor_id
).count()
return df_windowed_aggregation
if __name__ == "__main__":
spark = SparkSession.builder.appName('streaming-examples').getOrCreate()
spark.sparkContext.setLogLevel('WARN')
# read_streaming data
df_consume_stream = read_from_kafka(consume_topic=CONSUME_TOPIC_RIDES_CSV)
print(df_consume_stream.printSchema())
# parse streaming data
df_rides = parse_ride_from_kafka_message(df_consume_stream, RIDE_SCHEMA)
print(df_rides.printSchema())
sink_console(df_rides, output_mode='append')
df_trip_count_by_vendor_id = op_groupby(df_rides, ['vendor_id'])
df_trip_count_by_pickup_date_vendor_id = op_windowed_groupby(df_rides, window_duration="10 minutes",
slide_duration='5 minutes')
# write the output out to the console for debugging / testing
sink_console(df_trip_count_by_vendor_id)
# write the output to the kafka topic
df_trip_count_messages = prepare_df_to_kafka_sink(df=df_trip_count_by_pickup_date_vendor_id,
value_columns=['count'], key_column='vendor_id')
kafka_sink_query = sink_kafka(df=df_trip_count_messages, topic=TOPIC_WINDOWED_VENDOR_ID_COUNT)
spark.streams.awaitAnyTermination()
| 35.586207 | 115 | 0.65449 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | /**
* Autogenerated by Avro
*
* DO NOT EDIT DIRECTLY
*/
package schemaregistry;
import org.apache.avro.generic.GenericArray;
import org.apache.avro.specific.SpecificData;
import org.apache.avro.util.Utf8;
import org.apache.avro.message.BinaryMessageEncoder;
import org.apache.avro.message.BinaryMessageDecoder;
import org.apache.avro.message.SchemaStore;
@org.apache.avro.specific.AvroGenerated
public class RideRecord extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
private static final long serialVersionUID = 6805437803204402942L;
public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"RideRecord\",\"namespace\":\"schemaregistry\",\"fields\":[{\"name\":\"vendor_id\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"passenger_count\",\"type\":\"int\"},{\"name\":\"trip_distance\",\"type\":\"double\"}]}");
public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; }
private static final SpecificData MODEL$ = new SpecificData();
private static final BinaryMessageEncoder<RideRecord> ENCODER =
new BinaryMessageEncoder<>(MODEL$, SCHEMA$);
private static final BinaryMessageDecoder<RideRecord> DECODER =
new BinaryMessageDecoder<>(MODEL$, SCHEMA$);
/**
* Return the BinaryMessageEncoder instance used by this class.
* @return the message encoder used by this class
*/
public static BinaryMessageEncoder<RideRecord> getEncoder() {
return ENCODER;
}
/**
* Return the BinaryMessageDecoder instance used by this class.
* @return the message decoder used by this class
*/
public static BinaryMessageDecoder<RideRecord> getDecoder() {
return DECODER;
}
/**
* Create a new BinaryMessageDecoder instance for this class that uses the specified {@link SchemaStore}.
* @param resolver a {@link SchemaStore} used to find schemas by fingerprint
* @return a BinaryMessageDecoder instance for this class backed by the given SchemaStore
*/
public static BinaryMessageDecoder<RideRecord> createDecoder(SchemaStore resolver) {
return new BinaryMessageDecoder<>(MODEL$, SCHEMA$, resolver);
}
/**
* Serializes this RideRecord to a ByteBuffer.
* @return a buffer holding the serialized data for this instance
* @throws java.io.IOException if this instance could not be serialized
*/
public java.nio.ByteBuffer toByteBuffer() throws java.io.IOException {
return ENCODER.encode(this);
}
/**
* Deserializes a RideRecord from a ByteBuffer.
* @param b a byte buffer holding serialized data for an instance of this class
* @return a RideRecord instance decoded from the given buffer
* @throws java.io.IOException if the given bytes could not be deserialized into an instance of this class
*/
public static RideRecord fromByteBuffer(
java.nio.ByteBuffer b) throws java.io.IOException {
return DECODER.decode(b);
}
private java.lang.String vendor_id;
private int passenger_count;
private double trip_distance;
/**
* Default constructor. Note that this does not initialize fields
* to their default values from the schema. If that is desired then
* one should use <code>newBuilder()</code>.
*/
public RideRecord() {}
/**
* All-args constructor.
* @param vendor_id The new value for vendor_id
* @param passenger_count The new value for passenger_count
* @param trip_distance The new value for trip_distance
*/
public RideRecord(java.lang.String vendor_id, java.lang.Integer passenger_count, java.lang.Double trip_distance) {
this.vendor_id = vendor_id;
this.passenger_count = passenger_count;
this.trip_distance = trip_distance;
}
@Override
public org.apache.avro.specific.SpecificData getSpecificData() { return MODEL$; }
@Override
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
// Used by DatumWriter. Applications should not call.
@Override
public java.lang.Object get(int field$) {
switch (field$) {
case 0: return vendor_id;
case 1: return passenger_count;
case 2: return trip_distance;
default: throw new IndexOutOfBoundsException("Invalid index: " + field$);
}
}
// Used by DatumReader. Applications should not call.
@Override
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
case 0: vendor_id = value$ != null ? value$.toString() : null; break;
case 1: passenger_count = (java.lang.Integer)value$; break;
case 2: trip_distance = (java.lang.Double)value$; break;
default: throw new IndexOutOfBoundsException("Invalid index: " + field$);
}
}
/**
* Gets the value of the 'vendor_id' field.
* @return The value of the 'vendor_id' field.
*/
public java.lang.String getVendorId() {
return vendor_id;
}
/**
* Sets the value of the 'vendor_id' field.
* @param value the value to set.
*/
public void setVendorId(java.lang.String value) {
this.vendor_id = value;
}
/**
* Gets the value of the 'passenger_count' field.
* @return The value of the 'passenger_count' field.
*/
public int getPassengerCount() {
return passenger_count;
}
/**
* Sets the value of the 'passenger_count' field.
* @param value the value to set.
*/
public void setPassengerCount(int value) {
this.passenger_count = value;
}
/**
* Gets the value of the 'trip_distance' field.
* @return The value of the 'trip_distance' field.
*/
public double getTripDistance() {
return trip_distance;
}
/**
* Sets the value of the 'trip_distance' field.
* @param value the value to set.
*/
public void setTripDistance(double value) {
this.trip_distance = value;
}
/**
* Creates a new RideRecord RecordBuilder.
* @return A new RideRecord RecordBuilder
*/
public static schemaregistry.RideRecord.Builder newBuilder() {
return new schemaregistry.RideRecord.Builder();
}
/**
* Creates a new RideRecord RecordBuilder by copying an existing Builder.
* @param other The existing builder to copy.
* @return A new RideRecord RecordBuilder
*/
public static schemaregistry.RideRecord.Builder newBuilder(schemaregistry.RideRecord.Builder other) {
if (other == null) {
return new schemaregistry.RideRecord.Builder();
} else {
return new schemaregistry.RideRecord.Builder(other);
}
}
/**
* Creates a new RideRecord RecordBuilder by copying an existing RideRecord instance.
* @param other The existing instance to copy.
* @return A new RideRecord RecordBuilder
*/
public static schemaregistry.RideRecord.Builder newBuilder(schemaregistry.RideRecord other) {
if (other == null) {
return new schemaregistry.RideRecord.Builder();
} else {
return new schemaregistry.RideRecord.Builder(other);
}
}
/**
* RecordBuilder for RideRecord instances.
*/
@org.apache.avro.specific.AvroGenerated
public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase<RideRecord>
implements org.apache.avro.data.RecordBuilder<RideRecord> {
private java.lang.String vendor_id;
private int passenger_count;
private double trip_distance;
/** Creates a new Builder */
private Builder() {
super(SCHEMA$, MODEL$);
}
/**
* Creates a Builder by copying an existing Builder.
* @param other The existing Builder to copy.
*/
private Builder(schemaregistry.RideRecord.Builder other) {
super(other);
if (isValidValue(fields()[0], other.vendor_id)) {
this.vendor_id = data().deepCopy(fields()[0].schema(), other.vendor_id);
fieldSetFlags()[0] = other.fieldSetFlags()[0];
}
if (isValidValue(fields()[1], other.passenger_count)) {
this.passenger_count = data().deepCopy(fields()[1].schema(), other.passenger_count);
fieldSetFlags()[1] = other.fieldSetFlags()[1];
}
if (isValidValue(fields()[2], other.trip_distance)) {
this.trip_distance = data().deepCopy(fields()[2].schema(), other.trip_distance);
fieldSetFlags()[2] = other.fieldSetFlags()[2];
}
}
/**
* Creates a Builder by copying an existing RideRecord instance
* @param other The existing instance to copy.
*/
private Builder(schemaregistry.RideRecord other) {
super(SCHEMA$, MODEL$);
if (isValidValue(fields()[0], other.vendor_id)) {
this.vendor_id = data().deepCopy(fields()[0].schema(), other.vendor_id);
fieldSetFlags()[0] = true;
}
if (isValidValue(fields()[1], other.passenger_count)) {
this.passenger_count = data().deepCopy(fields()[1].schema(), other.passenger_count);
fieldSetFlags()[1] = true;
}
if (isValidValue(fields()[2], other.trip_distance)) {
this.trip_distance = data().deepCopy(fields()[2].schema(), other.trip_distance);
fieldSetFlags()[2] = true;
}
}
/**
* Gets the value of the 'vendor_id' field.
* @return The value.
*/
public java.lang.String getVendorId() {
return vendor_id;
}
/**
* Sets the value of the 'vendor_id' field.
* @param value The value of 'vendor_id'.
* @return This builder.
*/
public schemaregistry.RideRecord.Builder setVendorId(java.lang.String value) {
validate(fields()[0], value);
this.vendor_id = value;
fieldSetFlags()[0] = true;
return this;
}
/**
* Checks whether the 'vendor_id' field has been set.
* @return True if the 'vendor_id' field has been set, false otherwise.
*/
public boolean hasVendorId() {
return fieldSetFlags()[0];
}
/**
* Clears the value of the 'vendor_id' field.
* @return This builder.
*/
public schemaregistry.RideRecord.Builder clearVendorId() {
vendor_id = null;
fieldSetFlags()[0] = false;
return this;
}
/**
* Gets the value of the 'passenger_count' field.
* @return The value.
*/
public int getPassengerCount() {
return passenger_count;
}
/**
* Sets the value of the 'passenger_count' field.
* @param value The value of 'passenger_count'.
* @return This builder.
*/
public schemaregistry.RideRecord.Builder setPassengerCount(int value) {
validate(fields()[1], value);
this.passenger_count = value;
fieldSetFlags()[1] = true;
return this;
}
/**
* Checks whether the 'passenger_count' field has been set.
* @return True if the 'passenger_count' field has been set, false otherwise.
*/
public boolean hasPassengerCount() {
return fieldSetFlags()[1];
}
/**
* Clears the value of the 'passenger_count' field.
* @return This builder.
*/
public schemaregistry.RideRecord.Builder clearPassengerCount() {
fieldSetFlags()[1] = false;
return this;
}
/**
* Gets the value of the 'trip_distance' field.
* @return The value.
*/
public double getTripDistance() {
return trip_distance;
}
/**
* Sets the value of the 'trip_distance' field.
* @param value The value of 'trip_distance'.
* @return This builder.
*/
public schemaregistry.RideRecord.Builder setTripDistance(double value) {
validate(fields()[2], value);
this.trip_distance = value;
fieldSetFlags()[2] = true;
return this;
}
/**
* Checks whether the 'trip_distance' field has been set.
* @return True if the 'trip_distance' field has been set, false otherwise.
*/
public boolean hasTripDistance() {
return fieldSetFlags()[2];
}
/**
* Clears the value of the 'trip_distance' field.
* @return This builder.
*/
public schemaregistry.RideRecord.Builder clearTripDistance() {
fieldSetFlags()[2] = false;
return this;
}
@Override
@SuppressWarnings("unchecked")
public RideRecord build() {
try {
RideRecord record = new RideRecord();
record.vendor_id = fieldSetFlags()[0] ? this.vendor_id : (java.lang.String) defaultValue(fields()[0]);
record.passenger_count = fieldSetFlags()[1] ? this.passenger_count : (java.lang.Integer) defaultValue(fields()[1]);
record.trip_distance = fieldSetFlags()[2] ? this.trip_distance : (java.lang.Double) defaultValue(fields()[2]);
return record;
} catch (org.apache.avro.AvroMissingFieldException e) {
throw e;
} catch (java.lang.Exception e) {
throw new org.apache.avro.AvroRuntimeException(e);
}
}
}
@SuppressWarnings("unchecked")
private static final org.apache.avro.io.DatumWriter<RideRecord>
WRITER$ = (org.apache.avro.io.DatumWriter<RideRecord>)MODEL$.createDatumWriter(SCHEMA$);
@Override public void writeExternal(java.io.ObjectOutput out)
throws java.io.IOException {
WRITER$.write(this, SpecificData.getEncoder(out));
}
@SuppressWarnings("unchecked")
private static final org.apache.avro.io.DatumReader<RideRecord>
READER$ = (org.apache.avro.io.DatumReader<RideRecord>)MODEL$.createDatumReader(SCHEMA$);
@Override public void readExternal(java.io.ObjectInput in)
throws java.io.IOException {
READER$.read(this, SpecificData.getDecoder(in));
}
@Override protected boolean hasCustomCoders() { return true; }
@Override public void customEncode(org.apache.avro.io.Encoder out)
throws java.io.IOException
{
out.writeString(this.vendor_id);
out.writeInt(this.passenger_count);
out.writeDouble(this.trip_distance);
}
@Override public void customDecode(org.apache.avro.io.ResolvingDecoder in)
throws java.io.IOException
{
org.apache.avro.Schema.Field[] fieldOrder = in.readFieldOrderIfDiff();
if (fieldOrder == null) {
this.vendor_id = in.readString();
this.passenger_count = in.readInt();
this.trip_distance = in.readDouble();
} else {
for (int i = 0; i < 3; i++) {
switch (fieldOrder[i].pos()) {
case 0:
this.vendor_id = in.readString();
break;
case 1:
this.passenger_count = in.readInt();
break;
case 2:
this.trip_distance = in.readDouble();
break;
default:
throw new java.io.IOException("Corrupt ResolvingDecoder.");
}
}
}
}
}
| 29.533473 | 377 | 0.659381 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | /**
* Autogenerated by Avro
*
* DO NOT EDIT DIRECTLY
*/
package schemaregistry;
import org.apache.avro.generic.GenericArray;
import org.apache.avro.specific.SpecificData;
import org.apache.avro.util.Utf8;
import org.apache.avro.message.BinaryMessageEncoder;
import org.apache.avro.message.BinaryMessageDecoder;
import org.apache.avro.message.SchemaStore;
@org.apache.avro.specific.AvroGenerated
public class RideRecordCompatible extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
private static final long serialVersionUID = 7163300507090021229L;
public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"RideRecordCompatible\",\"namespace\":\"schemaregistry\",\"fields\":[{\"name\":\"vendorId\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"passenger_count\",\"type\":\"int\"},{\"name\":\"trip_distance\",\"type\":\"double\"},{\"name\":\"pu_location_id\",\"type\":[\"null\",\"long\"],\"default\":null}]}");
public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; }
private static final SpecificData MODEL$ = new SpecificData();
private static final BinaryMessageEncoder<RideRecordCompatible> ENCODER =
new BinaryMessageEncoder<>(MODEL$, SCHEMA$);
private static final BinaryMessageDecoder<RideRecordCompatible> DECODER =
new BinaryMessageDecoder<>(MODEL$, SCHEMA$);
/**
* Return the BinaryMessageEncoder instance used by this class.
* @return the message encoder used by this class
*/
public static BinaryMessageEncoder<RideRecordCompatible> getEncoder() {
return ENCODER;
}
/**
* Return the BinaryMessageDecoder instance used by this class.
* @return the message decoder used by this class
*/
public static BinaryMessageDecoder<RideRecordCompatible> getDecoder() {
return DECODER;
}
/**
* Create a new BinaryMessageDecoder instance for this class that uses the specified {@link SchemaStore}.
* @param resolver a {@link SchemaStore} used to find schemas by fingerprint
* @return a BinaryMessageDecoder instance for this class backed by the given SchemaStore
*/
public static BinaryMessageDecoder<RideRecordCompatible> createDecoder(SchemaStore resolver) {
return new BinaryMessageDecoder<>(MODEL$, SCHEMA$, resolver);
}
/**
* Serializes this RideRecordCompatible to a ByteBuffer.
* @return a buffer holding the serialized data for this instance
* @throws java.io.IOException if this instance could not be serialized
*/
public java.nio.ByteBuffer toByteBuffer() throws java.io.IOException {
return ENCODER.encode(this);
}
/**
* Deserializes a RideRecordCompatible from a ByteBuffer.
* @param b a byte buffer holding serialized data for an instance of this class
* @return a RideRecordCompatible instance decoded from the given buffer
* @throws java.io.IOException if the given bytes could not be deserialized into an instance of this class
*/
public static RideRecordCompatible fromByteBuffer(
java.nio.ByteBuffer b) throws java.io.IOException {
return DECODER.decode(b);
}
private java.lang.String vendorId;
private int passenger_count;
private double trip_distance;
private java.lang.Long pu_location_id;
/**
* Default constructor. Note that this does not initialize fields
* to their default values from the schema. If that is desired then
* one should use <code>newBuilder()</code>.
*/
public RideRecordCompatible() {}
/**
* All-args constructor.
* @param vendorId The new value for vendorId
* @param passenger_count The new value for passenger_count
* @param trip_distance The new value for trip_distance
* @param pu_location_id The new value for pu_location_id
*/
public RideRecordCompatible(java.lang.String vendorId, java.lang.Integer passenger_count, java.lang.Double trip_distance, java.lang.Long pu_location_id) {
this.vendorId = vendorId;
this.passenger_count = passenger_count;
this.trip_distance = trip_distance;
this.pu_location_id = pu_location_id;
}
@Override
public org.apache.avro.specific.SpecificData getSpecificData() { return MODEL$; }
@Override
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
// Used by DatumWriter. Applications should not call.
@Override
public java.lang.Object get(int field$) {
switch (field$) {
case 0: return vendorId;
case 1: return passenger_count;
case 2: return trip_distance;
case 3: return pu_location_id;
default: throw new IndexOutOfBoundsException("Invalid index: " + field$);
}
}
// Used by DatumReader. Applications should not call.
@Override
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
case 0: vendorId = value$ != null ? value$.toString() : null; break;
case 1: passenger_count = (java.lang.Integer)value$; break;
case 2: trip_distance = (java.lang.Double)value$; break;
case 3: pu_location_id = (java.lang.Long)value$; break;
default: throw new IndexOutOfBoundsException("Invalid index: " + field$);
}
}
/**
* Gets the value of the 'vendorId' field.
* @return The value of the 'vendorId' field.
*/
public java.lang.String getVendorId() {
return vendorId;
}
/**
* Sets the value of the 'vendorId' field.
* @param value the value to set.
*/
public void setVendorId(java.lang.String value) {
this.vendorId = value;
}
/**
* Gets the value of the 'passenger_count' field.
* @return The value of the 'passenger_count' field.
*/
public int getPassengerCount() {
return passenger_count;
}
/**
* Sets the value of the 'passenger_count' field.
* @param value the value to set.
*/
public void setPassengerCount(int value) {
this.passenger_count = value;
}
/**
* Gets the value of the 'trip_distance' field.
* @return The value of the 'trip_distance' field.
*/
public double getTripDistance() {
return trip_distance;
}
/**
* Sets the value of the 'trip_distance' field.
* @param value the value to set.
*/
public void setTripDistance(double value) {
this.trip_distance = value;
}
/**
* Gets the value of the 'pu_location_id' field.
* @return The value of the 'pu_location_id' field.
*/
public java.lang.Long getPuLocationId() {
return pu_location_id;
}
/**
* Sets the value of the 'pu_location_id' field.
* @param value the value to set.
*/
public void setPuLocationId(java.lang.Long value) {
this.pu_location_id = value;
}
/**
* Creates a new RideRecordCompatible RecordBuilder.
* @return A new RideRecordCompatible RecordBuilder
*/
public static schemaregistry.RideRecordCompatible.Builder newBuilder() {
return new schemaregistry.RideRecordCompatible.Builder();
}
/**
* Creates a new RideRecordCompatible RecordBuilder by copying an existing Builder.
* @param other The existing builder to copy.
* @return A new RideRecordCompatible RecordBuilder
*/
public static schemaregistry.RideRecordCompatible.Builder newBuilder(schemaregistry.RideRecordCompatible.Builder other) {
if (other == null) {
return new schemaregistry.RideRecordCompatible.Builder();
} else {
return new schemaregistry.RideRecordCompatible.Builder(other);
}
}
/**
* Creates a new RideRecordCompatible RecordBuilder by copying an existing RideRecordCompatible instance.
* @param other The existing instance to copy.
* @return A new RideRecordCompatible RecordBuilder
*/
public static schemaregistry.RideRecordCompatible.Builder newBuilder(schemaregistry.RideRecordCompatible other) {
if (other == null) {
return new schemaregistry.RideRecordCompatible.Builder();
} else {
return new schemaregistry.RideRecordCompatible.Builder(other);
}
}
/**
* RecordBuilder for RideRecordCompatible instances.
*/
@org.apache.avro.specific.AvroGenerated
public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase<RideRecordCompatible>
implements org.apache.avro.data.RecordBuilder<RideRecordCompatible> {
private java.lang.String vendorId;
private int passenger_count;
private double trip_distance;
private java.lang.Long pu_location_id;
/** Creates a new Builder */
private Builder() {
super(SCHEMA$, MODEL$);
}
/**
* Creates a Builder by copying an existing Builder.
* @param other The existing Builder to copy.
*/
private Builder(schemaregistry.RideRecordCompatible.Builder other) {
super(other);
if (isValidValue(fields()[0], other.vendorId)) {
this.vendorId = data().deepCopy(fields()[0].schema(), other.vendorId);
fieldSetFlags()[0] = other.fieldSetFlags()[0];
}
if (isValidValue(fields()[1], other.passenger_count)) {
this.passenger_count = data().deepCopy(fields()[1].schema(), other.passenger_count);
fieldSetFlags()[1] = other.fieldSetFlags()[1];
}
if (isValidValue(fields()[2], other.trip_distance)) {
this.trip_distance = data().deepCopy(fields()[2].schema(), other.trip_distance);
fieldSetFlags()[2] = other.fieldSetFlags()[2];
}
if (isValidValue(fields()[3], other.pu_location_id)) {
this.pu_location_id = data().deepCopy(fields()[3].schema(), other.pu_location_id);
fieldSetFlags()[3] = other.fieldSetFlags()[3];
}
}
/**
* Creates a Builder by copying an existing RideRecordCompatible instance
* @param other The existing instance to copy.
*/
private Builder(schemaregistry.RideRecordCompatible other) {
super(SCHEMA$, MODEL$);
if (isValidValue(fields()[0], other.vendorId)) {
this.vendorId = data().deepCopy(fields()[0].schema(), other.vendorId);
fieldSetFlags()[0] = true;
}
if (isValidValue(fields()[1], other.passenger_count)) {
this.passenger_count = data().deepCopy(fields()[1].schema(), other.passenger_count);
fieldSetFlags()[1] = true;
}
if (isValidValue(fields()[2], other.trip_distance)) {
this.trip_distance = data().deepCopy(fields()[2].schema(), other.trip_distance);
fieldSetFlags()[2] = true;
}
if (isValidValue(fields()[3], other.pu_location_id)) {
this.pu_location_id = data().deepCopy(fields()[3].schema(), other.pu_location_id);
fieldSetFlags()[3] = true;
}
}
/**
* Gets the value of the 'vendorId' field.
* @return The value.
*/
public java.lang.String getVendorId() {
return vendorId;
}
/**
* Sets the value of the 'vendorId' field.
* @param value The value of 'vendorId'.
* @return This builder.
*/
public schemaregistry.RideRecordCompatible.Builder setVendorId(java.lang.String value) {
validate(fields()[0], value);
this.vendorId = value;
fieldSetFlags()[0] = true;
return this;
}
/**
* Checks whether the 'vendorId' field has been set.
* @return True if the 'vendorId' field has been set, false otherwise.
*/
public boolean hasVendorId() {
return fieldSetFlags()[0];
}
/**
* Clears the value of the 'vendorId' field.
* @return This builder.
*/
public schemaregistry.RideRecordCompatible.Builder clearVendorId() {
vendorId = null;
fieldSetFlags()[0] = false;
return this;
}
/**
* Gets the value of the 'passenger_count' field.
* @return The value.
*/
public int getPassengerCount() {
return passenger_count;
}
/**
* Sets the value of the 'passenger_count' field.
* @param value The value of 'passenger_count'.
* @return This builder.
*/
public schemaregistry.RideRecordCompatible.Builder setPassengerCount(int value) {
validate(fields()[1], value);
this.passenger_count = value;
fieldSetFlags()[1] = true;
return this;
}
/**
* Checks whether the 'passenger_count' field has been set.
* @return True if the 'passenger_count' field has been set, false otherwise.
*/
public boolean hasPassengerCount() {
return fieldSetFlags()[1];
}
/**
* Clears the value of the 'passenger_count' field.
* @return This builder.
*/
public schemaregistry.RideRecordCompatible.Builder clearPassengerCount() {
fieldSetFlags()[1] = false;
return this;
}
/**
* Gets the value of the 'trip_distance' field.
* @return The value.
*/
public double getTripDistance() {
return trip_distance;
}
/**
* Sets the value of the 'trip_distance' field.
* @param value The value of 'trip_distance'.
* @return This builder.
*/
public schemaregistry.RideRecordCompatible.Builder setTripDistance(double value) {
validate(fields()[2], value);
this.trip_distance = value;
fieldSetFlags()[2] = true;
return this;
}
/**
* Checks whether the 'trip_distance' field has been set.
* @return True if the 'trip_distance' field has been set, false otherwise.
*/
public boolean hasTripDistance() {
return fieldSetFlags()[2];
}
/**
* Clears the value of the 'trip_distance' field.
* @return This builder.
*/
public schemaregistry.RideRecordCompatible.Builder clearTripDistance() {
fieldSetFlags()[2] = false;
return this;
}
/**
* Gets the value of the 'pu_location_id' field.
* @return The value.
*/
public java.lang.Long getPuLocationId() {
return pu_location_id;
}
/**
* Sets the value of the 'pu_location_id' field.
* @param value The value of 'pu_location_id'.
* @return This builder.
*/
public schemaregistry.RideRecordCompatible.Builder setPuLocationId(java.lang.Long value) {
validate(fields()[3], value);
this.pu_location_id = value;
fieldSetFlags()[3] = true;
return this;
}
/**
* Checks whether the 'pu_location_id' field has been set.
* @return True if the 'pu_location_id' field has been set, false otherwise.
*/
public boolean hasPuLocationId() {
return fieldSetFlags()[3];
}
/**
* Clears the value of the 'pu_location_id' field.
* @return This builder.
*/
public schemaregistry.RideRecordCompatible.Builder clearPuLocationId() {
pu_location_id = null;
fieldSetFlags()[3] = false;
return this;
}
@Override
@SuppressWarnings("unchecked")
public RideRecordCompatible build() {
try {
RideRecordCompatible record = new RideRecordCompatible();
record.vendorId = fieldSetFlags()[0] ? this.vendorId : (java.lang.String) defaultValue(fields()[0]);
record.passenger_count = fieldSetFlags()[1] ? this.passenger_count : (java.lang.Integer) defaultValue(fields()[1]);
record.trip_distance = fieldSetFlags()[2] ? this.trip_distance : (java.lang.Double) defaultValue(fields()[2]);
record.pu_location_id = fieldSetFlags()[3] ? this.pu_location_id : (java.lang.Long) defaultValue(fields()[3]);
return record;
} catch (org.apache.avro.AvroMissingFieldException e) {
throw e;
} catch (java.lang.Exception e) {
throw new org.apache.avro.AvroRuntimeException(e);
}
}
}
@SuppressWarnings("unchecked")
private static final org.apache.avro.io.DatumWriter<RideRecordCompatible>
WRITER$ = (org.apache.avro.io.DatumWriter<RideRecordCompatible>)MODEL$.createDatumWriter(SCHEMA$);
@Override public void writeExternal(java.io.ObjectOutput out)
throws java.io.IOException {
WRITER$.write(this, SpecificData.getEncoder(out));
}
@SuppressWarnings("unchecked")
private static final org.apache.avro.io.DatumReader<RideRecordCompatible>
READER$ = (org.apache.avro.io.DatumReader<RideRecordCompatible>)MODEL$.createDatumReader(SCHEMA$);
@Override public void readExternal(java.io.ObjectInput in)
throws java.io.IOException {
READER$.read(this, SpecificData.getDecoder(in));
}
@Override protected boolean hasCustomCoders() { return true; }
@Override public void customEncode(org.apache.avro.io.Encoder out)
throws java.io.IOException
{
out.writeString(this.vendorId);
out.writeInt(this.passenger_count);
out.writeDouble(this.trip_distance);
if (this.pu_location_id == null) {
out.writeIndex(0);
out.writeNull();
} else {
out.writeIndex(1);
out.writeLong(this.pu_location_id);
}
}
@Override public void customDecode(org.apache.avro.io.ResolvingDecoder in)
throws java.io.IOException
{
org.apache.avro.Schema.Field[] fieldOrder = in.readFieldOrderIfDiff();
if (fieldOrder == null) {
this.vendorId = in.readString();
this.passenger_count = in.readInt();
this.trip_distance = in.readDouble();
if (in.readIndex() != 1) {
in.readNull();
this.pu_location_id = null;
} else {
this.pu_location_id = in.readLong();
}
} else {
for (int i = 0; i < 4; i++) {
switch (fieldOrder[i].pos()) {
case 0:
this.vendorId = in.readString();
break;
case 1:
this.passenger_count = in.readInt();
break;
case 2:
this.trip_distance = in.readDouble();
break;
case 3:
if (in.readIndex() != 1) {
in.readNull();
this.pu_location_id = null;
} else {
this.pu_location_id = in.readLong();
}
break;
default:
throw new java.io.IOException("Corrupt ResolvingDecoder.");
}
}
}
}
}
| 30.317073 | 462 | 0.657858 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | /**
* Autogenerated by Avro
*
* DO NOT EDIT DIRECTLY
*/
package schemaregistry;
import org.apache.avro.generic.GenericArray;
import org.apache.avro.specific.SpecificData;
import org.apache.avro.util.Utf8;
import org.apache.avro.message.BinaryMessageEncoder;
import org.apache.avro.message.BinaryMessageDecoder;
import org.apache.avro.message.SchemaStore;
@org.apache.avro.specific.AvroGenerated
public class RideRecordNoneCompatible extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
private static final long serialVersionUID = -4618980179396772493L;
public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"RideRecordNoneCompatible\",\"namespace\":\"schemaregistry\",\"fields\":[{\"name\":\"vendorId\",\"type\":\"int\"},{\"name\":\"passenger_count\",\"type\":\"int\"},{\"name\":\"trip_distance\",\"type\":\"double\"}]}");
public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; }
private static final SpecificData MODEL$ = new SpecificData();
private static final BinaryMessageEncoder<RideRecordNoneCompatible> ENCODER =
new BinaryMessageEncoder<>(MODEL$, SCHEMA$);
private static final BinaryMessageDecoder<RideRecordNoneCompatible> DECODER =
new BinaryMessageDecoder<>(MODEL$, SCHEMA$);
/**
* Return the BinaryMessageEncoder instance used by this class.
* @return the message encoder used by this class
*/
public static BinaryMessageEncoder<RideRecordNoneCompatible> getEncoder() {
return ENCODER;
}
/**
* Return the BinaryMessageDecoder instance used by this class.
* @return the message decoder used by this class
*/
public static BinaryMessageDecoder<RideRecordNoneCompatible> getDecoder() {
return DECODER;
}
/**
* Create a new BinaryMessageDecoder instance for this class that uses the specified {@link SchemaStore}.
* @param resolver a {@link SchemaStore} used to find schemas by fingerprint
* @return a BinaryMessageDecoder instance for this class backed by the given SchemaStore
*/
public static BinaryMessageDecoder<RideRecordNoneCompatible> createDecoder(SchemaStore resolver) {
return new BinaryMessageDecoder<>(MODEL$, SCHEMA$, resolver);
}
/**
* Serializes this RideRecordNoneCompatible to a ByteBuffer.
* @return a buffer holding the serialized data for this instance
* @throws java.io.IOException if this instance could not be serialized
*/
public java.nio.ByteBuffer toByteBuffer() throws java.io.IOException {
return ENCODER.encode(this);
}
/**
* Deserializes a RideRecordNoneCompatible from a ByteBuffer.
* @param b a byte buffer holding serialized data for an instance of this class
* @return a RideRecordNoneCompatible instance decoded from the given buffer
* @throws java.io.IOException if the given bytes could not be deserialized into an instance of this class
*/
public static RideRecordNoneCompatible fromByteBuffer(
java.nio.ByteBuffer b) throws java.io.IOException {
return DECODER.decode(b);
}
private int vendorId;
private int passenger_count;
private double trip_distance;
/**
* Default constructor. Note that this does not initialize fields
* to their default values from the schema. If that is desired then
* one should use <code>newBuilder()</code>.
*/
public RideRecordNoneCompatible() {}
/**
* All-args constructor.
* @param vendorId The new value for vendorId
* @param passenger_count The new value for passenger_count
* @param trip_distance The new value for trip_distance
*/
public RideRecordNoneCompatible(java.lang.Integer vendorId, java.lang.Integer passenger_count, java.lang.Double trip_distance) {
this.vendorId = vendorId;
this.passenger_count = passenger_count;
this.trip_distance = trip_distance;
}
@Override
public org.apache.avro.specific.SpecificData getSpecificData() { return MODEL$; }
@Override
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
// Used by DatumWriter. Applications should not call.
@Override
public java.lang.Object get(int field$) {
switch (field$) {
case 0: return vendorId;
case 1: return passenger_count;
case 2: return trip_distance;
default: throw new IndexOutOfBoundsException("Invalid index: " + field$);
}
}
// Used by DatumReader. Applications should not call.
@Override
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
case 0: vendorId = (java.lang.Integer)value$; break;
case 1: passenger_count = (java.lang.Integer)value$; break;
case 2: trip_distance = (java.lang.Double)value$; break;
default: throw new IndexOutOfBoundsException("Invalid index: " + field$);
}
}
/**
* Gets the value of the 'vendorId' field.
* @return The value of the 'vendorId' field.
*/
public int getVendorId() {
return vendorId;
}
/**
* Sets the value of the 'vendorId' field.
* @param value the value to set.
*/
public void setVendorId(int value) {
this.vendorId = value;
}
/**
* Gets the value of the 'passenger_count' field.
* @return The value of the 'passenger_count' field.
*/
public int getPassengerCount() {
return passenger_count;
}
/**
* Sets the value of the 'passenger_count' field.
* @param value the value to set.
*/
public void setPassengerCount(int value) {
this.passenger_count = value;
}
/**
* Gets the value of the 'trip_distance' field.
* @return The value of the 'trip_distance' field.
*/
public double getTripDistance() {
return trip_distance;
}
/**
* Sets the value of the 'trip_distance' field.
* @param value the value to set.
*/
public void setTripDistance(double value) {
this.trip_distance = value;
}
/**
* Creates a new RideRecordNoneCompatible RecordBuilder.
* @return A new RideRecordNoneCompatible RecordBuilder
*/
public static schemaregistry.RideRecordNoneCompatible.Builder newBuilder() {
return new schemaregistry.RideRecordNoneCompatible.Builder();
}
/**
* Creates a new RideRecordNoneCompatible RecordBuilder by copying an existing Builder.
* @param other The existing builder to copy.
* @return A new RideRecordNoneCompatible RecordBuilder
*/
public static schemaregistry.RideRecordNoneCompatible.Builder newBuilder(schemaregistry.RideRecordNoneCompatible.Builder other) {
if (other == null) {
return new schemaregistry.RideRecordNoneCompatible.Builder();
} else {
return new schemaregistry.RideRecordNoneCompatible.Builder(other);
}
}
/**
* Creates a new RideRecordNoneCompatible RecordBuilder by copying an existing RideRecordNoneCompatible instance.
* @param other The existing instance to copy.
* @return A new RideRecordNoneCompatible RecordBuilder
*/
public static schemaregistry.RideRecordNoneCompatible.Builder newBuilder(schemaregistry.RideRecordNoneCompatible other) {
if (other == null) {
return new schemaregistry.RideRecordNoneCompatible.Builder();
} else {
return new schemaregistry.RideRecordNoneCompatible.Builder(other);
}
}
/**
* RecordBuilder for RideRecordNoneCompatible instances.
*/
@org.apache.avro.specific.AvroGenerated
public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase<RideRecordNoneCompatible>
implements org.apache.avro.data.RecordBuilder<RideRecordNoneCompatible> {
private int vendorId;
private int passenger_count;
private double trip_distance;
/** Creates a new Builder */
private Builder() {
super(SCHEMA$, MODEL$);
}
/**
* Creates a Builder by copying an existing Builder.
* @param other The existing Builder to copy.
*/
private Builder(schemaregistry.RideRecordNoneCompatible.Builder other) {
super(other);
if (isValidValue(fields()[0], other.vendorId)) {
this.vendorId = data().deepCopy(fields()[0].schema(), other.vendorId);
fieldSetFlags()[0] = other.fieldSetFlags()[0];
}
if (isValidValue(fields()[1], other.passenger_count)) {
this.passenger_count = data().deepCopy(fields()[1].schema(), other.passenger_count);
fieldSetFlags()[1] = other.fieldSetFlags()[1];
}
if (isValidValue(fields()[2], other.trip_distance)) {
this.trip_distance = data().deepCopy(fields()[2].schema(), other.trip_distance);
fieldSetFlags()[2] = other.fieldSetFlags()[2];
}
}
/**
* Creates a Builder by copying an existing RideRecordNoneCompatible instance
* @param other The existing instance to copy.
*/
private Builder(schemaregistry.RideRecordNoneCompatible other) {
super(SCHEMA$, MODEL$);
if (isValidValue(fields()[0], other.vendorId)) {
this.vendorId = data().deepCopy(fields()[0].schema(), other.vendorId);
fieldSetFlags()[0] = true;
}
if (isValidValue(fields()[1], other.passenger_count)) {
this.passenger_count = data().deepCopy(fields()[1].schema(), other.passenger_count);
fieldSetFlags()[1] = true;
}
if (isValidValue(fields()[2], other.trip_distance)) {
this.trip_distance = data().deepCopy(fields()[2].schema(), other.trip_distance);
fieldSetFlags()[2] = true;
}
}
/**
* Gets the value of the 'vendorId' field.
* @return The value.
*/
public int getVendorId() {
return vendorId;
}
/**
* Sets the value of the 'vendorId' field.
* @param value The value of 'vendorId'.
* @return This builder.
*/
public schemaregistry.RideRecordNoneCompatible.Builder setVendorId(int value) {
validate(fields()[0], value);
this.vendorId = value;
fieldSetFlags()[0] = true;
return this;
}
/**
* Checks whether the 'vendorId' field has been set.
* @return True if the 'vendorId' field has been set, false otherwise.
*/
public boolean hasVendorId() {
return fieldSetFlags()[0];
}
/**
* Clears the value of the 'vendorId' field.
* @return This builder.
*/
public schemaregistry.RideRecordNoneCompatible.Builder clearVendorId() {
fieldSetFlags()[0] = false;
return this;
}
/**
* Gets the value of the 'passenger_count' field.
* @return The value.
*/
public int getPassengerCount() {
return passenger_count;
}
/**
* Sets the value of the 'passenger_count' field.
* @param value The value of 'passenger_count'.
* @return This builder.
*/
public schemaregistry.RideRecordNoneCompatible.Builder setPassengerCount(int value) {
validate(fields()[1], value);
this.passenger_count = value;
fieldSetFlags()[1] = true;
return this;
}
/**
* Checks whether the 'passenger_count' field has been set.
* @return True if the 'passenger_count' field has been set, false otherwise.
*/
public boolean hasPassengerCount() {
return fieldSetFlags()[1];
}
/**
* Clears the value of the 'passenger_count' field.
* @return This builder.
*/
public schemaregistry.RideRecordNoneCompatible.Builder clearPassengerCount() {
fieldSetFlags()[1] = false;
return this;
}
/**
* Gets the value of the 'trip_distance' field.
* @return The value.
*/
public double getTripDistance() {
return trip_distance;
}
/**
* Sets the value of the 'trip_distance' field.
* @param value The value of 'trip_distance'.
* @return This builder.
*/
public schemaregistry.RideRecordNoneCompatible.Builder setTripDistance(double value) {
validate(fields()[2], value);
this.trip_distance = value;
fieldSetFlags()[2] = true;
return this;
}
/**
* Checks whether the 'trip_distance' field has been set.
* @return True if the 'trip_distance' field has been set, false otherwise.
*/
public boolean hasTripDistance() {
return fieldSetFlags()[2];
}
/**
* Clears the value of the 'trip_distance' field.
* @return This builder.
*/
public schemaregistry.RideRecordNoneCompatible.Builder clearTripDistance() {
fieldSetFlags()[2] = false;
return this;
}
@Override
@SuppressWarnings("unchecked")
public RideRecordNoneCompatible build() {
try {
RideRecordNoneCompatible record = new RideRecordNoneCompatible();
record.vendorId = fieldSetFlags()[0] ? this.vendorId : (java.lang.Integer) defaultValue(fields()[0]);
record.passenger_count = fieldSetFlags()[1] ? this.passenger_count : (java.lang.Integer) defaultValue(fields()[1]);
record.trip_distance = fieldSetFlags()[2] ? this.trip_distance : (java.lang.Double) defaultValue(fields()[2]);
return record;
} catch (org.apache.avro.AvroMissingFieldException e) {
throw e;
} catch (java.lang.Exception e) {
throw new org.apache.avro.AvroRuntimeException(e);
}
}
}
@SuppressWarnings("unchecked")
private static final org.apache.avro.io.DatumWriter<RideRecordNoneCompatible>
WRITER$ = (org.apache.avro.io.DatumWriter<RideRecordNoneCompatible>)MODEL$.createDatumWriter(SCHEMA$);
@Override public void writeExternal(java.io.ObjectOutput out)
throws java.io.IOException {
WRITER$.write(this, SpecificData.getEncoder(out));
}
@SuppressWarnings("unchecked")
private static final org.apache.avro.io.DatumReader<RideRecordNoneCompatible>
READER$ = (org.apache.avro.io.DatumReader<RideRecordNoneCompatible>)MODEL$.createDatumReader(SCHEMA$);
@Override public void readExternal(java.io.ObjectInput in)
throws java.io.IOException {
READER$.read(this, SpecificData.getDecoder(in));
}
@Override protected boolean hasCustomCoders() { return true; }
@Override public void customEncode(org.apache.avro.io.Encoder out)
throws java.io.IOException
{
out.writeInt(this.vendorId);
out.writeInt(this.passenger_count);
out.writeDouble(this.trip_distance);
}
@Override public void customDecode(org.apache.avro.io.ResolvingDecoder in)
throws java.io.IOException
{
org.apache.avro.Schema.Field[] fieldOrder = in.readFieldOrderIfDiff();
if (fieldOrder == null) {
this.vendorId = in.readInt();
this.passenger_count = in.readInt();
this.trip_distance = in.readDouble();
} else {
for (int i = 0; i < 3; i++) {
switch (fieldOrder[i].pos()) {
case 0:
this.vendorId = in.readInt();
break;
case 1:
this.passenger_count = in.readInt();
break;
case 2:
this.trip_distance = in.readDouble();
break;
default:
throw new java.io.IOException("Corrupt ResolvingDecoder.");
}
}
}
}
}
| 30.607966 | 344 | 0.675975 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example;
import com.opencsv.CSVReader;
import com.opencsv.exceptions.CsvException;
import io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig;
import io.confluent.kafka.serializers.KafkaAvroSerializer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.streams.StreamsConfig;
import schemaregistry.RideRecord;
import java.io.FileReader;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.stream.Collectors;
public class AvroProducer {
private Properties props = new Properties();
public AvroProducer() {
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "pkc-75m1o.europe-west3.gcp.confluent.cloud:9092");
props.put("security.protocol", "SASL_SSL");
props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='"+Secrets.KAFKA_CLUSTER_KEY+"' password='"+Secrets.KAFKA_CLUSTER_SECRET+"';");
props.put("sasl.mechanism", "PLAIN");
props.put("client.dns.lookup", "use_all_dns_ips");
props.put("session.timeout.ms", "45000");
props.put(ProducerConfig.ACKS_CONFIG, "all");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, KafkaAvroSerializer.class.getName());
props.put(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, "https://psrc-kk5gg.europe-west3.gcp.confluent.cloud");
props.put("basic.auth.credentials.source", "USER_INFO");
props.put("basic.auth.user.info", Secrets.SCHEMA_REGISTRY_KEY+":"+Secrets.SCHEMA_REGISTRY_SECRET);
}
public List<RideRecord> getRides() throws IOException, CsvException {
var ridesStream = this.getClass().getResource("/rides.csv");
var reader = new CSVReader(new FileReader(ridesStream.getFile()));
reader.skip(1);
return reader.readAll().stream().map(row ->
RideRecord.newBuilder()
.setVendorId(row[0])
.setTripDistance(Double.parseDouble(row[4]))
.setPassengerCount(Integer.parseInt(row[3]))
.build()
).collect(Collectors.toList());
}
public void publishRides(List<RideRecord> rides) throws ExecutionException, InterruptedException {
KafkaProducer<String, RideRecord> kafkaProducer = new KafkaProducer<>(props);
for (RideRecord ride : rides) {
var record = kafkaProducer.send(new ProducerRecord<>("rides_avro", String.valueOf(ride.getVendorId()), ride), (metadata, exception) -> {
if (exception != null) {
System.out.println(exception.getMessage());
}
});
System.out.println(record.get().offset());
Thread.sleep(500);
}
}
public static void main(String[] args) throws IOException, CsvException, ExecutionException, InterruptedException {
var producer = new AvroProducer();
var rideRecords = producer.getRides();
producer.publishRides(rideRecords);
}
}
| 44.767123 | 192 | 0.688323 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.example.data.Ride;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import java.time.temporal.TemporalUnit;
import java.util.List;
import java.util.Properties;
import io.confluent.kafka.serializers.KafkaJsonDeserializerConfig;
public class JsonConsumer {
private Properties props = new Properties();
private KafkaConsumer<String, Ride> consumer;
public JsonConsumer() {
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "pkc-75m1o.europe-west3.gcp.confluent.cloud:9092");
props.put("security.protocol", "SASL_SSL");
props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='"+Secrets.KAFKA_CLUSTER_KEY+"' password='"+Secrets.KAFKA_CLUSTER_SECRET+"';");
props.put("sasl.mechanism", "PLAIN");
props.put("client.dns.lookup", "use_all_dns_ips");
props.put("session.timeout.ms", "45000");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "io.confluent.kafka.serializers.KafkaJsonDeserializer");
props.put(ConsumerConfig.GROUP_ID_CONFIG, "kafka_tutorial_example.jsonconsumer.v2");
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
props.put(KafkaJsonDeserializerConfig.JSON_VALUE_TYPE, Ride.class);
consumer = new KafkaConsumer<String, Ride>(props);
consumer.subscribe(List.of("rides"));
}
public void consumeFromKafka() {
System.out.println("Consuming form kafka started");
var results = consumer.poll(Duration.of(1, ChronoUnit.SECONDS));
var i = 0;
do {
for(ConsumerRecord<String, Ride> result: results) {
System.out.println(result.value().DOLocationID);
}
results = consumer.poll(Duration.of(1, ChronoUnit.SECONDS));
System.out.println("RESULTS:::" + results.count());
i++;
}
while(!results.isEmpty() || i < 10);
}
public static void main(String[] args) {
JsonConsumer jsonConsumer = new JsonConsumer();
jsonConsumer.consumeFromKafka();
}
}
| 42.631579 | 192 | 0.697104 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.Topology;
import org.apache.kafka.streams.kstream.Consumed;
import org.apache.kafka.streams.kstream.Produced;
import org.example.customserdes.CustomSerdes;
import org.example.data.Ride;
import java.util.Properties;
public class JsonKStream {
private Properties props = new Properties();
public JsonKStream() {
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "pkc-75m1o.europe-west3.gcp.confluent.cloud:9092");
props.put("security.protocol", "SASL_SSL");
props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='"+Secrets.KAFKA_CLUSTER_KEY+"' password='"+Secrets.KAFKA_CLUSTER_SECRET+"';");
props.put("sasl.mechanism", "PLAIN");
props.put("client.dns.lookup", "use_all_dns_ips");
props.put("session.timeout.ms", "45000");
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka_tutorial.kstream.count.plocation.v1");
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
}
public Topology createTopology() {
StreamsBuilder streamsBuilder = new StreamsBuilder();
var ridesStream = streamsBuilder.stream("rides", Consumed.with(Serdes.String(), CustomSerdes.getSerde(Ride.class)));
var puLocationCount = ridesStream.groupByKey().count().toStream();
puLocationCount.to("rides-pulocation-count", Produced.with(Serdes.String(), Serdes.Long()));
return streamsBuilder.build();
}
public void countPLocation() throws InterruptedException {
var topology = createTopology();
var kStreams = new KafkaStreams(topology, props);
kStreams.start();
while (kStreams.state() != KafkaStreams.State.RUNNING) {
System.out.println(kStreams.state());
Thread.sleep(1000);
}
System.out.println(kStreams.state());
Runtime.getRuntime().addShutdownHook(new Thread(kStreams::close));
}
public static void main(String[] args) throws InterruptedException {
var object = new JsonKStream();
object.countPLocation();
}
}
| 42.175439 | 192 | 0.707724 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.Topology;
import org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler;
import org.apache.kafka.streams.kstream.*;
import org.example.customserdes.CustomSerdes;
import org.example.data.PickupLocation;
import org.example.data.Ride;
import org.example.data.VendorInfo;
import java.time.Duration;
import java.util.Optional;
import java.util.Properties;
public class JsonKStreamJoins {
private Properties props = new Properties();
public JsonKStreamJoins() {
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "pkc-75m1o.europe-west3.gcp.confluent.cloud:9092");
props.put("security.protocol", "SASL_SSL");
props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='"+Secrets.KAFKA_CLUSTER_KEY+"' password='"+Secrets.KAFKA_CLUSTER_SECRET+"';");
props.put("sasl.mechanism", "PLAIN");
props.put("client.dns.lookup", "use_all_dns_ips");
props.put("session.timeout.ms", "45000");
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka_tutorial.kstream.joined.rides.pickuplocation.v1");
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
}
public Topology createTopology() {
StreamsBuilder streamsBuilder = new StreamsBuilder();
KStream<String, Ride> rides = streamsBuilder.stream(Topics.INPUT_RIDE_TOPIC, Consumed.with(Serdes.String(), CustomSerdes.getSerde(Ride.class)));
KStream<String, PickupLocation> pickupLocations = streamsBuilder.stream(Topics.INPUT_RIDE_LOCATION_TOPIC, Consumed.with(Serdes.String(), CustomSerdes.getSerde(PickupLocation.class)));
var pickupLocationsKeyedOnPUId = pickupLocations.selectKey((key, value) -> String.valueOf(value.PULocationID));
var joined = rides.join(pickupLocationsKeyedOnPUId, (ValueJoiner<Ride, PickupLocation, Optional<VendorInfo>>) (ride, pickupLocation) -> {
var period = Duration.between(ride.tpep_dropoff_datetime, pickupLocation.tpep_pickup_datetime);
if (period.abs().toMinutes() > 10) return Optional.empty();
else return Optional.of(new VendorInfo(ride.VendorID, pickupLocation.PULocationID, pickupLocation.tpep_pickup_datetime, ride.tpep_dropoff_datetime));
}, JoinWindows.ofTimeDifferenceAndGrace(Duration.ofMinutes(20), Duration.ofMinutes(5)),
StreamJoined.with(Serdes.String(), CustomSerdes.getSerde(Ride.class), CustomSerdes.getSerde(PickupLocation.class)));
joined.filter(((key, value) -> value.isPresent())).mapValues(Optional::get)
.to(Topics.OUTPUT_TOPIC, Produced.with(Serdes.String(), CustomSerdes.getSerde(VendorInfo.class)));
return streamsBuilder.build();
}
public void joinRidesPickupLocation() throws InterruptedException {
var topology = createTopology();
var kStreams = new KafkaStreams(topology, props);
kStreams.setUncaughtExceptionHandler(exception -> {
System.out.println(exception.getMessage());
return StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse.SHUTDOWN_APPLICATION;
});
kStreams.start();
while (kStreams.state() != KafkaStreams.State.RUNNING) {
System.out.println(kStreams.state());
Thread.sleep(1000);
}
System.out.println(kStreams.state());
Runtime.getRuntime().addShutdownHook(new Thread(kStreams::close));
}
public static void main(String[] args) throws InterruptedException {
var object = new JsonKStreamJoins();
object.joinRidesPickupLocation();
}
}
| 50.922078 | 192 | 0.718039 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.Topology;
import org.apache.kafka.streams.kstream.Consumed;
import org.apache.kafka.streams.kstream.Produced;
import org.apache.kafka.streams.kstream.TimeWindows;
import org.apache.kafka.streams.kstream.WindowedSerdes;
import org.example.customserdes.CustomSerdes;
import org.example.data.Ride;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import java.util.Properties;
public class JsonKStreamWindow {
private Properties props = new Properties();
public JsonKStreamWindow() {
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "pkc-75m1o.europe-west3.gcp.confluent.cloud:9092");
props.put("security.protocol", "SASL_SSL");
props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='"+Secrets.KAFKA_CLUSTER_KEY+"' password='"+Secrets.KAFKA_CLUSTER_SECRET+"';");
props.put("sasl.mechanism", "PLAIN");
props.put("client.dns.lookup", "use_all_dns_ips");
props.put("session.timeout.ms", "45000");
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka_tutorial.kstream.count.plocation.v1");
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
}
public Topology createTopology() {
StreamsBuilder streamsBuilder = new StreamsBuilder();
var ridesStream = streamsBuilder.stream("rides", Consumed.with(Serdes.String(), CustomSerdes.getSerde(Ride.class)));
var puLocationCount = ridesStream.groupByKey()
.windowedBy(TimeWindows.ofSizeAndGrace(Duration.ofSeconds(10), Duration.ofSeconds(5)))
.count().toStream();
var windowSerde = WindowedSerdes.timeWindowedSerdeFrom(String.class, 10*1000);
puLocationCount.to("rides-pulocation-window-count", Produced.with(windowSerde, Serdes.Long()));
return streamsBuilder.build();
}
public void countPLocationWindowed() {
var topology = createTopology();
var kStreams = new KafkaStreams(topology, props);
kStreams.start();
Runtime.getRuntime().addShutdownHook(new Thread(kStreams::close));
}
public static void main(String[] args) {
var object = new JsonKStreamWindow();
object.countPLocationWindowed();
}
}
| 41.983607 | 192 | 0.724151 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example;
import com.opencsv.CSVReader;
import com.opencsv.exceptions.CsvException;
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.streams.StreamsConfig;
import org.example.data.Ride;
import java.io.FileReader;
import java.io.IOException;
import java.time.LocalDateTime;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.stream.Collectors;
public class JsonProducer {
private Properties props = new Properties();
public JsonProducer() {
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "pkc-75m1o.europe-west3.gcp.confluent.cloud:9092");
props.put("security.protocol", "SASL_SSL");
props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='"+Secrets.KAFKA_CLUSTER_KEY+"' password='"+Secrets.KAFKA_CLUSTER_SECRET+"';");
props.put("sasl.mechanism", "PLAIN");
props.put("client.dns.lookup", "use_all_dns_ips");
props.put("session.timeout.ms", "45000");
props.put(ProducerConfig.ACKS_CONFIG, "all");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "io.confluent.kafka.serializers.KafkaJsonSerializer");
}
public List<Ride> getRides() throws IOException, CsvException {
var ridesStream = this.getClass().getResource("/rides.csv");
var reader = new CSVReader(new FileReader(ridesStream.getFile()));
reader.skip(1);
return reader.readAll().stream().map(arr -> new Ride(arr))
.collect(Collectors.toList());
}
public void publishRides(List<Ride> rides) throws ExecutionException, InterruptedException {
KafkaProducer<String, Ride> kafkaProducer = new KafkaProducer<String, Ride>(props);
for(Ride ride: rides) {
ride.tpep_pickup_datetime = LocalDateTime.now().minusMinutes(20);
ride.tpep_dropoff_datetime = LocalDateTime.now();
var record = kafkaProducer.send(new ProducerRecord<>("rides", String.valueOf(ride.DOLocationID), ride), (metadata, exception) -> {
if(exception != null) {
System.out.println(exception.getMessage());
}
});
System.out.println(record.get().offset());
System.out.println(ride.DOLocationID);
Thread.sleep(500);
}
}
public static void main(String[] args) throws IOException, CsvException, ExecutionException, InterruptedException {
var producer = new JsonProducer();
var rides = producer.getRides();
producer.publishRides(rides);
}
} | 44.278689 | 192 | 0.682361 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example;
import com.opencsv.exceptions.CsvException;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.example.data.PickupLocation;
import java.io.IOException;
import java.time.LocalDateTime;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
public class JsonProducerPickupLocation {
private Properties props = new Properties();
public JsonProducerPickupLocation() {
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "pkc-75m1o.europe-west3.gcp.confluent.cloud:9092");
props.put("security.protocol", "SASL_SSL");
props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='"+Secrets.KAFKA_CLUSTER_KEY+"' password='"+Secrets.KAFKA_CLUSTER_SECRET+"';");
props.put("sasl.mechanism", "PLAIN");
props.put("client.dns.lookup", "use_all_dns_ips");
props.put("session.timeout.ms", "45000");
props.put(ProducerConfig.ACKS_CONFIG, "all");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "io.confluent.kafka.serializers.KafkaJsonSerializer");
}
public void publish(PickupLocation pickupLocation) throws ExecutionException, InterruptedException {
KafkaProducer<String, PickupLocation> kafkaProducer = new KafkaProducer<String, PickupLocation>(props);
var record = kafkaProducer.send(new ProducerRecord<>("rides_location", String.valueOf(pickupLocation.PULocationID), pickupLocation), (metadata, exception) -> {
if (exception != null) {
System.out.println(exception.getMessage());
}
});
System.out.println(record.get().offset());
}
public static void main(String[] args) throws IOException, CsvException, ExecutionException, InterruptedException {
var producer = new JsonProducerPickupLocation();
producer.publish(new PickupLocation(186, LocalDateTime.now()));
}
}
| 47.577778 | 192 | 0.730892 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example;
public class Secrets {
public static final String KAFKA_CLUSTER_KEY = "REPLACE_WITH_YOUR_KAFKA_CLUSTER_KEY";
public static final String KAFKA_CLUSTER_SECRET = "REPLACE_WITH_YOUR_KAFKA_CLUSTER_SECRET";
public static final String SCHEMA_REGISTRY_KEY = "REPLACE_WITH_SCHEMA_REGISTRY_KEY";
public static final String SCHEMA_REGISTRY_SECRET = "REPLACE_WITH_SCHEMA_REGISTRY_SECRET";
}
| 37.181818 | 95 | 0.761337 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example;
public class Topics {
public static final String INPUT_RIDE_TOPIC = "rides";
public static final String INPUT_RIDE_LOCATION_TOPIC = "rides_location";
public static final String OUTPUT_TOPIC = "vendor_info";
}
| 29.5 | 76 | 0.73251 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example.customserdes;
import io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig;
import io.confluent.kafka.serializers.KafkaJsonDeserializer;
import io.confluent.kafka.serializers.KafkaJsonSerializer;
import io.confluent.kafka.streams.serdes.avro.SpecificAvroSerde;
import org.apache.avro.specific.SpecificRecordBase;
import org.apache.kafka.common.serialization.Deserializer;
import org.apache.kafka.common.serialization.Serde;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.common.serialization.Serializer;
import org.example.data.PickupLocation;
import org.example.data.Ride;
import org.example.data.VendorInfo;
import java.util.HashMap;
import java.util.Map;
public class CustomSerdes {
public static <T> Serde<T> getSerde(Class<T> classOf) {
Map<String, Object> serdeProps = new HashMap<>();
serdeProps.put("json.value.type", classOf);
final Serializer<T> mySerializer = new KafkaJsonSerializer<>();
mySerializer.configure(serdeProps, false);
final Deserializer<T> myDeserializer = new KafkaJsonDeserializer<>();
myDeserializer.configure(serdeProps, false);
return Serdes.serdeFrom(mySerializer, myDeserializer);
}
public static <T extends SpecificRecordBase> SpecificAvroSerde getAvroSerde(boolean isKey, String schemaRegistryUrl) {
var serde = new SpecificAvroSerde<T>();
Map<String, Object> serdeProps = new HashMap<>();
serdeProps.put(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, schemaRegistryUrl);
serde.configure(serdeProps, isKey);
return serde;
}
}
| 37.325581 | 122 | 0.763206 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example.data;
import java.time.LocalDateTime;
public class PickupLocation {
public PickupLocation(long PULocationID, LocalDateTime tpep_pickup_datetime) {
this.PULocationID = PULocationID;
this.tpep_pickup_datetime = tpep_pickup_datetime;
}
public PickupLocation() {
}
public long PULocationID;
public LocalDateTime tpep_pickup_datetime;
}
| 22.352941 | 82 | 0.724747 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example.data;
import java.nio.DoubleBuffer;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
public class Ride {
public Ride(String[] arr) {
VendorID = arr[0];
tpep_pickup_datetime = LocalDateTime.parse(arr[1], DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"));
tpep_dropoff_datetime = LocalDateTime.parse(arr[2], DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"));
passenger_count = Integer.parseInt(arr[3]);
trip_distance = Double.parseDouble(arr[4]);
RatecodeID = Long.parseLong(arr[5]);
store_and_fwd_flag = arr[6];
PULocationID = Long.parseLong(arr[7]);
DOLocationID = Long.parseLong(arr[8]);
payment_type = arr[9];
fare_amount = Double.parseDouble(arr[10]);
extra = Double.parseDouble(arr[11]);
mta_tax = Double.parseDouble(arr[12]);
tip_amount = Double.parseDouble(arr[13]);
tolls_amount = Double.parseDouble(arr[14]);
improvement_surcharge = Double.parseDouble(arr[15]);
total_amount = Double.parseDouble(arr[16]);
congestion_surcharge = Double.parseDouble(arr[17]);
}
public Ride(){}
public String VendorID;
public LocalDateTime tpep_pickup_datetime;
public LocalDateTime tpep_dropoff_datetime;
public int passenger_count;
public double trip_distance;
public long RatecodeID;
public String store_and_fwd_flag;
public long PULocationID;
public long DOLocationID;
public String payment_type;
public double fare_amount;
public double extra;
public double mta_tax;
public double tip_amount;
public double tolls_amount;
public double improvement_surcharge;
public double total_amount;
public double congestion_surcharge;
}
| 35.56 | 112 | 0.681445 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example.data;
import java.time.LocalDateTime;
public class VendorInfo {
public VendorInfo(String vendorID, long PULocationID, LocalDateTime pickupTime, LocalDateTime lastDropoffTime) {
VendorID = vendorID;
this.PULocationID = PULocationID;
this.pickupTime = pickupTime;
this.lastDropoffTime = lastDropoffTime;
}
public VendorInfo() {
}
public String VendorID;
public long PULocationID;
public LocalDateTime pickupTime;
public LocalDateTime lastDropoffTime;
}
| 23.590909 | 116 | 0.72037 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.internals.Topic;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.*;
import org.example.customserdes.CustomSerdes;
import org.example.data.PickupLocation;
import org.example.data.Ride;
import org.example.data.VendorInfo;
import org.example.helper.DataGeneratorHelper;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import javax.xml.crypto.Data;
import java.util.Properties;
import static org.junit.jupiter.api.Assertions.*;
class JsonKStreamJoinsTest {
private Properties props = new Properties();
private static TopologyTestDriver testDriver;
private TestInputTopic<String, Ride> ridesTopic;
private TestInputTopic<String, PickupLocation> pickLocationTopic;
private TestOutputTopic<String, VendorInfo> outputTopic;
private Topology topology = new JsonKStreamJoins().createTopology();
@BeforeEach
public void setup() {
props = new Properties();
props.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, "testing_count_application");
props.setProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234");
if (testDriver != null) {
testDriver.close();
}
testDriver = new TopologyTestDriver(topology, props);
ridesTopic = testDriver.createInputTopic(Topics.INPUT_RIDE_TOPIC, Serdes.String().serializer(), CustomSerdes.getSerde(Ride.class).serializer());
pickLocationTopic = testDriver.createInputTopic(Topics.INPUT_RIDE_LOCATION_TOPIC, Serdes.String().serializer(), CustomSerdes.getSerde(PickupLocation.class).serializer());
outputTopic = testDriver.createOutputTopic(Topics.OUTPUT_TOPIC, Serdes.String().deserializer(), CustomSerdes.getSerde(VendorInfo.class).deserializer());
}
@Test
public void testIfJoinWorksOnSameDropOffPickupLocationId() {
Ride ride = DataGeneratorHelper.generateRide();
PickupLocation pickupLocation = DataGeneratorHelper.generatePickUpLocation(ride.DOLocationID);
ridesTopic.pipeInput(String.valueOf(ride.DOLocationID), ride);
pickLocationTopic.pipeInput(String.valueOf(pickupLocation.PULocationID), pickupLocation);
assertEquals(outputTopic.getQueueSize(), 1);
var expected = new VendorInfo(ride.VendorID, pickupLocation.PULocationID, pickupLocation.tpep_pickup_datetime, ride.tpep_dropoff_datetime);
var result = outputTopic.readKeyValue();
assertEquals(result.key, String.valueOf(ride.DOLocationID));
assertEquals(result.value.VendorID, expected.VendorID);
assertEquals(result.value.pickupTime, expected.pickupTime);
}
@AfterAll
public static void shutdown() {
testDriver.close();
}
} | 44.460317 | 178 | 0.754803 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.*;
import org.example.customserdes.CustomSerdes;
import org.example.data.Ride;
import org.example.helper.DataGeneratorHelper;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.*;
import java.util.Properties;
class JsonKStreamTest {
private Properties props;
private static TopologyTestDriver testDriver;
private TestInputTopic<String, Ride> inputTopic;
private TestOutputTopic<String, Long> outputTopic;
private Topology topology = new JsonKStream().createTopology();
@BeforeEach
public void setup() {
props = new Properties();
props.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, "testing_count_application");
props.setProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234");
if (testDriver != null) {
testDriver.close();
}
testDriver = new TopologyTestDriver(topology, props);
inputTopic = testDriver.createInputTopic("rides", Serdes.String().serializer(), CustomSerdes.getSerde(Ride.class).serializer());
outputTopic = testDriver.createOutputTopic("rides-pulocation-count", Serdes.String().deserializer(), Serdes.Long().deserializer());
}
@Test
public void testIfOneMessageIsPassedToInputTopicWeGetCountOfOne() {
Ride ride = DataGeneratorHelper.generateRide();
inputTopic.pipeInput(String.valueOf(ride.DOLocationID), ride);
assertEquals(outputTopic.readKeyValue(), KeyValue.pair(String.valueOf(ride.DOLocationID), 1L));
assertTrue(outputTopic.isEmpty());
}
@Test
public void testIfTwoMessageArePassedWithDifferentKey() {
Ride ride1 = DataGeneratorHelper.generateRide();
ride1.DOLocationID = 100L;
inputTopic.pipeInput(String.valueOf(ride1.DOLocationID), ride1);
Ride ride2 = DataGeneratorHelper.generateRide();
ride2.DOLocationID = 200L;
inputTopic.pipeInput(String.valueOf(ride2.DOLocationID), ride2);
assertEquals(outputTopic.readKeyValue(), KeyValue.pair(String.valueOf(ride1.DOLocationID), 1L));
assertEquals(outputTopic.readKeyValue(), KeyValue.pair(String.valueOf(ride2.DOLocationID), 1L));
assertTrue(outputTopic.isEmpty());
}
@Test
public void testIfTwoMessageArePassedWithSameKey() {
Ride ride1 = DataGeneratorHelper.generateRide();
ride1.DOLocationID = 100L;
inputTopic.pipeInput(String.valueOf(ride1.DOLocationID), ride1);
Ride ride2 = DataGeneratorHelper.generateRide();
ride2.DOLocationID = 100L;
inputTopic.pipeInput(String.valueOf(ride2.DOLocationID), ride2);
assertEquals(outputTopic.readKeyValue(), KeyValue.pair("100", 1L));
assertEquals(outputTopic.readKeyValue(), KeyValue.pair("100", 2L));
assertTrue(outputTopic.isEmpty());
}
@AfterAll
public static void tearDown() {
testDriver.close();
}
} | 37.7375 | 139 | 0.715623 |
data-engineering-zoomcamp | https://github.com/DataTalksClub/data-engineering-zoomcamp | Free Data Engineering course! | 15,757 | 3,602 | 2023-12-05 01:08:47+00:00 | 2021-10-21 09:32:50+00:00 | 1,561 | null | Java | package org.example.helper;
import org.example.data.PickupLocation;
import org.example.data.Ride;
import org.example.data.VendorInfo;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.List;
public class DataGeneratorHelper {
public static Ride generateRide() {
var arrivalTime = LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"));
var departureTime = LocalDateTime.now().minusMinutes(30).format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"));
return new Ride(new String[]{"1", departureTime, arrivalTime,"1","1.50","1","N","238","75","2","8","0.5","0.5","0","0","0.3","9.3","0"});
}
public static PickupLocation generatePickUpLocation(long pickupLocationId) {
return new PickupLocation(pickupLocationId, LocalDateTime.now());
}
}
| 38 | 145 | 0.715286 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | from datetime import datetime, timedelta
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators import LoadDimensionOperator
from helpers import SqlQueries
def load_dimension_subdag(
parent_dag_name,
task_id,
redshift_conn_id,
sql_statement,
delete_load,
table_name,
*args, **kwargs):
dag = DAG(f"{parent_dag_name}.{task_id}", **kwargs)
load_dimension_table = LoadDimensionOperator(
task_id=task_id,
dag=dag,
redshift_conn_id=redshift_conn_id,
sql_query = sql_statement,
delete_load = delete_load,
table_name = table_name,
)
load_dimension_table
return dag | 23.333333 | 58 | 0.657064 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | from datetime import datetime, timedelta
import os
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators import ( CreateTableOperator, StageToRedshiftOperator, LoadFactOperator,
LoadDimensionOperator, DataQualityOperator)
from helpers import SqlQueries
from sparkify_dimension_subdag import load_dimension_subdag
from airflow.operators.subdag_operator import SubDagOperator
#AWS_KEY = os.environ.get('AWS_KEY')
#AWS_SECRET = os.environ.get('AWS_SECRET')
s3_bucket = 'udacity-dend-warehouse'
song_s3_key = "song_data"
log_s3_key = "log-data"
log_json_file = "log_json_path.json"
default_args = {
'owner': 'udacity',
'depends_on_past': True,
'start_date': datetime(2019, 1, 12),
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
'catchup': True
}
dag_name = 'udac_example_dag'
dag = DAG(dag_name,
default_args=default_args,
description='Load and transform data in Redshift with Airflow',
schedule_interval='0 * * * *',
max_active_runs = 1
)
start_operator = DummyOperator(task_id='Begin_execution', dag=dag)
create_tables_in_redshift = CreateTableOperator(
task_id = 'create_tables_in_redshift',
redshift_conn_id = 'redshift',
dag = dag
)
stage_events_to_redshift = StageToRedshiftOperator(
task_id='Stage_events',
table_name="staging_events",
s3_bucket = s3_bucket,
s3_key = log_s3_key,
file_format="JSON",
log_json_file = log_json_file,
redshift_conn_id = "redshift",
aws_credential_id="aws_credentials",
dag=dag,
provide_context=True
)
stage_songs_to_redshift = StageToRedshiftOperator(
task_id='Stage_songs',
table_name="staging_songs",
s3_bucket = s3_bucket,
s3_key = song_s3_key,
file_format="JSON",
redshift_conn_id = "redshift",
aws_credential_id="aws_credentials",
dag=dag,
provide_context=True
)
load_songplays_table = LoadFactOperator(
task_id='Load_songplays_fact_table',
redshift_conn_id = 'redshift',
sql_query = SqlQueries.songplay_table_insert,
dag=dag
)
load_user_dimension_table = SubDagOperator(
subdag=load_dimension_subdag(
parent_dag_name=dag_name,
task_id="Load_user_dim_table",
redshift_conn_id="redshift",
start_date=default_args['start_date'],
sql_statement=SqlQueries.user_table_insert,
delete_load = True,
table_name = "users",
),
task_id="Load_user_dim_table",
dag=dag,
)
load_song_dimension_table = SubDagOperator(
subdag=load_dimension_subdag(
parent_dag_name=dag_name,
task_id="Load_song_dim_table",
redshift_conn_id="redshift",
start_date=default_args['start_date'],
sql_statement=SqlQueries.song_table_insert,
delete_load = True,
table_name = "songs",
),
task_id="Load_song_dim_table",
dag=dag,
)
load_artist_dimension_table = SubDagOperator(
subdag=load_dimension_subdag(
parent_dag_name=dag_name,
task_id="Load_artist_dim_table",
redshift_conn_id="redshift",
start_date=default_args['start_date'],
sql_statement=SqlQueries.artist_table_insert,
delete_load = True,
table_name = "artists",
),
task_id="Load_artist_dim_table",
dag=dag,
)
load_time_dimension_table = SubDagOperator(
subdag=load_dimension_subdag(
parent_dag_name=dag_name,
task_id="Load_time_dim_table",
redshift_conn_id="redshift",
start_date=default_args['start_date'],
sql_statement=SqlQueries.time_table_insert,
delete_load = True,
table_name = "time",
),
task_id="Load_time_dim_table",
dag=dag,
)
run_quality_checks = DataQualityOperator(
task_id='Run_data_quality_checks',
dag=dag,
redshift_conn_id = "redshift",
tables = ["artists", "songplays", "songs", "time", "users"]
)
end_operator = DummyOperator(task_id='Stop_execution', dag=dag)
start_operator >> create_tables_in_redshift
create_tables_in_redshift >> [stage_songs_to_redshift, stage_events_to_redshift] >> load_songplays_table
load_songplays_table >> [load_user_dimension_table, load_song_dimension_table, load_artist_dimension_table, load_time_dimension_table] >> run_quality_checks >> end_operator
| 27.06962 | 172 | 0.657871 |
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | 0 | 0 | 0 |
|
Udacity-Data-Engineering-Projects | https://github.com/san089/Udacity-Data-Engineering-Projects | Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development. | 1,219 | 433 | 2023-12-04 20:08:27+00:00 | 2020-01-20 22:50:03+00:00 | 2,128 | Other | Python | import configparser
# CONFIG
config = configparser.ConfigParser()
config.read('dwh.cfg')
# DROP TABLES
staging_events_table_drop = "DROP TABle IF EXISTS staging_events;"
staging_songs_table_drop = "DROP TABLE IF EXISTS staging_songs;"
songplay_table_drop = "DROP TABLE IF EXISTS songplays;"
user_table_drop = "DROP TABLE IF EXISTS users;"
song_table_drop = "DROP TABLE IF EXISTS songs;"
artist_table_drop = "DROP TABLE IF EXISTS artists;"
time_table_drop = "DROP TABLE IF EXISTS time;"
# CREATE TABLES
staging_events_table_create= ("""
CREATE TABLE IF NOT EXISTS staging_events
(
artist VARCHAR,
auth VARCHAR,
firstName VARCHAR(50),
gender CHAR,
itemInSession INTEGER,
lastName VARCHAR(50),
length FLOAT,
level VARCHAR,
location VARCHAR,
method VARCHAR,
page VARCHAR,
registration FLOAT,
sessionId INTEGER,
song VARCHAR,
status INTEGER,
ts BIGINT,
userAgent VARCHAR,
userId INTEGER
);
""")
staging_songs_table_create = ("""
CREATE TABLE IF NOT EXISTS staging_songs
(
num_songs INTEGER,
artist_id VARCHAR,
artist_latitude FLOAT,
artist_longitude FLOAT,
artist_location VARCHAR,
artist_name VARCHAR,
song_id VARCHAR,
title VARCHAR,
duration FLOAT,
year FLOAT
);
""")
songplay_table_create = ("""
CREATE TABLE IF NOT EXISTS songplays
(
songplay_id INTEGER IDENTITY (1, 1) PRIMARY KEY ,
start_time TIMESTAMP,
user_id INTEGER,
level VARCHAR,
song_id VARCHAR,
artist_id VARCHAR,
session_id INTEGER,
location VARCHAR,
user_agent VARCHAR
)
DISTSTYLE KEY
DISTKEY ( start_time )
SORTKEY ( start_time );
""")
user_table_create = ("""
CREATE TABLE IF NOT EXISTS users
(
userId INTEGER PRIMARY KEY,
firsname VARCHAR(50),
lastname VARCHAR(50),
gender CHAR(1) ENCODE BYTEDICT,
level VARCHAR ENCODE BYTEDICT
)
SORTKEY (userId);
""")
song_table_create = ("""
CREATE TABLE IF NOT EXISTS songs
(
song_id VARCHAR PRIMARY KEY,
title VARCHAR,
artist_id VARCHAR,
year INTEGER ENCODE BYTEDICT,
duration FLOAT
)
SORTKEY (song_id);
""")
artist_table_create = ("""
CREATE TABLE IF NOT EXISTS artists
(
artist_id VARCHAR PRIMARY KEY ,
name VARCHAR,
location VARCHAR,
latitude FLOAT,
longitude FLOAT
)
SORTKEY (artist_id);
""")
time_table_create = ("""
CREATE TABLE IF NOT EXISTS time
(
start_time TIMESTAMP PRIMARY KEY ,
hour INTEGER,
day INTEGER,
week INTEGER,
month INTEGER,
year INTEGER ENCODE BYTEDICT ,
weekday VARCHAR(9) ENCODE BYTEDICT
)
DISTSTYLE KEY
DISTKEY ( start_time )
SORTKEY (start_time);
""")
# STAGING TABLES
staging_events_copy = ("""
COPY staging_events
FROM {}
iam_role {}
FORMAT AS json {};
""").format(config['S3']['LOG_DATA'], config['IAM_ROLE']['ARN'], config['S3']['LOG_JSONPATH'])
staging_songs_copy = ("""
COPY staging_songs
FROM {}
iam_role {}
FORMAT AS json 'auto';
""").format(config['S3']['SONG_DATA'], config['IAM_ROLE']['ARN'])
# FINAL TABLES
songplay_table_insert = ("""
INSERT INTO songplays (START_TIME, USER_ID, LEVEL, SONG_ID, ARTIST_ID, SESSION_ID, LOCATION, USER_AGENT)
SELECT DISTINCT
TIMESTAMP 'epoch' + (se.ts / 1000) * INTERVAL '1 second' as start_time,
se.userId,
se.level,
ss.song_id,
ss.artist_id,
se.sessionId,
se.location,
se.userAgent
FROM staging_songs ss
INNER JOIN staging_events se
ON (ss.title = se.song AND se.artist = ss.artist_name)
AND se.page = 'NextSong';
""")
user_table_insert = ("""
INSERT INTO users
SELECT DISTINCT userId, firstName, lastName, gender, level
FROM staging_events
WHERE userId IS NOT NULL
AND page = 'NextSong';
""")
song_table_insert = ("""
INSERT INTO songs
SELECT
DISTINCT song_id, title, artist_id, year, duration
FROM staging_songs
WHERE song_id IS NOT NULL;
""")
artist_table_insert = ("""
INSERT INTO artists
SELECT
DISTINCT artist_id, artist_name, artist_location, artist_latitude, artist_longitude
FROM staging_songs;
""")
time_table_insert = ("""
insert into time
SELECT DISTINCT
TIMESTAMP 'epoch' + (ts/1000) * INTERVAL '1 second' as start_time,
EXTRACT(HOUR FROM start_time) AS hour,
EXTRACT(DAY FROM start_time) AS day,
EXTRACT(WEEKS FROM start_time) AS week,
EXTRACT(MONTH FROM start_time) AS month,
EXTRACT(YEAR FROM start_time) AS year,
to_char(start_time, 'Day') AS weekday
FROM staging_events;
""")
# QUERY LISTS
create_table_queries = [staging_events_table_create, staging_songs_table_create, songplay_table_create, user_table_create, song_table_create, artist_table_create, time_table_create]
drop_table_queries = [staging_events_table_drop, staging_songs_table_drop, songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop]
copy_table_queries = [staging_events_copy, staging_songs_copy]
insert_table_queries = [songplay_table_insert, user_table_insert, song_table_insert, artist_table_insert, time_table_insert]
| 23.429952 | 181 | 0.68038 |