prompt
stringlengths 45
17.8k
| completion
stringlengths 6
107
| api
stringlengths 12
42
|
---|---|---|
"""Initial
Revision ID: d63ccd5484d7
Revises:
Create Date: 2021-11-14 00:28:55.123695
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('facilities',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('category', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('notes', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_facilities_category'), 'facilities', ['category'], unique=False)
op.create_index(op.f('ix_facilities_created_at'), 'facilities', ['created_at'], unique=False)
op.create_index(op.f('ix_facilities_id'), 'facilities', ['id'], unique=False)
op.create_index(op.f('ix_facilities_name'), 'facilities', ['name'], unique=False)
op.create_index(op.f('ix_facilities_notes'), 'facilities', ['notes'], unique=False)
op.create_index(op.f('ix_facilities_updated_at'), 'facilities', ['updated_at'], unique=False)
op.create_table('increment',
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_increment_id'), 'increment', ['id'], unique=False)
op.create_table('listings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('title', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('description', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('url', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source_id', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source_code', | sqlmodel.sql.sqltypes.AutoString() | sqlmodel.sql.sqltypes.AutoString |
"""Initial
Revision ID: d63ccd5484d7
Revises:
Create Date: 2021-11-14 00:28:55.123695
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('facilities',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('category', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('notes', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_facilities_category'), 'facilities', ['category'], unique=False)
op.create_index(op.f('ix_facilities_created_at'), 'facilities', ['created_at'], unique=False)
op.create_index(op.f('ix_facilities_id'), 'facilities', ['id'], unique=False)
op.create_index(op.f('ix_facilities_name'), 'facilities', ['name'], unique=False)
op.create_index(op.f('ix_facilities_notes'), 'facilities', ['notes'], unique=False)
op.create_index(op.f('ix_facilities_updated_at'), 'facilities', ['updated_at'], unique=False)
op.create_table('increment',
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_increment_id'), 'increment', ['id'], unique=False)
op.create_table('listings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('title', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('description', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('url', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source_id', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source_code', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('address', | sqlmodel.sql.sqltypes.AutoString() | sqlmodel.sql.sqltypes.AutoString |
"""Initial
Revision ID: d63ccd5484d7
Revises:
Create Date: 2021-11-14 00:28:55.123695
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('facilities',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('category', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('notes', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_facilities_category'), 'facilities', ['category'], unique=False)
op.create_index(op.f('ix_facilities_created_at'), 'facilities', ['created_at'], unique=False)
op.create_index(op.f('ix_facilities_id'), 'facilities', ['id'], unique=False)
op.create_index(op.f('ix_facilities_name'), 'facilities', ['name'], unique=False)
op.create_index(op.f('ix_facilities_notes'), 'facilities', ['notes'], unique=False)
op.create_index(op.f('ix_facilities_updated_at'), 'facilities', ['updated_at'], unique=False)
op.create_table('increment',
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_increment_id'), 'increment', ['id'], unique=False)
op.create_table('listings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('title', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('description', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('url', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source_id', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source_code', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('address', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('short_postal_code', | sqlmodel.sql.sqltypes.AutoString() | sqlmodel.sql.sqltypes.AutoString |
"""Initial
Revision ID: d63ccd5484d7
Revises:
Create Date: 2021-11-14 00:28:55.123695
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('facilities',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('category', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('notes', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_facilities_category'), 'facilities', ['category'], unique=False)
op.create_index(op.f('ix_facilities_created_at'), 'facilities', ['created_at'], unique=False)
op.create_index(op.f('ix_facilities_id'), 'facilities', ['id'], unique=False)
op.create_index(op.f('ix_facilities_name'), 'facilities', ['name'], unique=False)
op.create_index(op.f('ix_facilities_notes'), 'facilities', ['notes'], unique=False)
op.create_index(op.f('ix_facilities_updated_at'), 'facilities', ['updated_at'], unique=False)
op.create_table('increment',
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_increment_id'), 'increment', ['id'], unique=False)
op.create_table('listings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('title', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('description', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('url', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source_id', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source_code', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('address', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('short_postal_code', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('property_type', | sqlmodel.sql.sqltypes.AutoString() | sqlmodel.sql.sqltypes.AutoString |
"""Initial
Revision ID: d63ccd5484d7
Revises:
Create Date: 2021-11-14 00:28:55.123695
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('facilities',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('category', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('notes', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_facilities_category'), 'facilities', ['category'], unique=False)
op.create_index(op.f('ix_facilities_created_at'), 'facilities', ['created_at'], unique=False)
op.create_index(op.f('ix_facilities_id'), 'facilities', ['id'], unique=False)
op.create_index(op.f('ix_facilities_name'), 'facilities', ['name'], unique=False)
op.create_index(op.f('ix_facilities_notes'), 'facilities', ['notes'], unique=False)
op.create_index(op.f('ix_facilities_updated_at'), 'facilities', ['updated_at'], unique=False)
op.create_table('increment',
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_increment_id'), 'increment', ['id'], unique=False)
op.create_table('listings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('title', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('description', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('url', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source_id', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source_code', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('address', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('short_postal_code', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('property_type', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('postal_code', | sqlmodel.sql.sqltypes.AutoString() | sqlmodel.sql.sqltypes.AutoString |
"""Initial
Revision ID: d63ccd5484d7
Revises:
Create Date: 2021-11-14 00:28:55.123695
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('facilities',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('category', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('notes', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_facilities_category'), 'facilities', ['category'], unique=False)
op.create_index(op.f('ix_facilities_created_at'), 'facilities', ['created_at'], unique=False)
op.create_index(op.f('ix_facilities_id'), 'facilities', ['id'], unique=False)
op.create_index(op.f('ix_facilities_name'), 'facilities', ['name'], unique=False)
op.create_index(op.f('ix_facilities_notes'), 'facilities', ['notes'], unique=False)
op.create_index(op.f('ix_facilities_updated_at'), 'facilities', ['updated_at'], unique=False)
op.create_table('increment',
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_increment_id'), 'increment', ['id'], unique=False)
op.create_table('listings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('title', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('description', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('url', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source_id', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source_code', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('address', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('short_postal_code', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('property_type', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('postal_code', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('ber_code', | sqlmodel.sql.sqltypes.AutoString() | sqlmodel.sql.sqltypes.AutoString |
"""Initial
Revision ID: d63ccd5484d7
Revises:
Create Date: 2021-11-14 00:28:55.123695
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('facilities',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('category', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('notes', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_facilities_category'), 'facilities', ['category'], unique=False)
op.create_index(op.f('ix_facilities_created_at'), 'facilities', ['created_at'], unique=False)
op.create_index(op.f('ix_facilities_id'), 'facilities', ['id'], unique=False)
op.create_index(op.f('ix_facilities_name'), 'facilities', ['name'], unique=False)
op.create_index(op.f('ix_facilities_notes'), 'facilities', ['notes'], unique=False)
op.create_index(op.f('ix_facilities_updated_at'), 'facilities', ['updated_at'], unique=False)
op.create_table('increment',
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_increment_id'), 'increment', ['id'], unique=False)
op.create_table('listings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('title', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('description', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('url', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source_id', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source_code', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('address', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('short_postal_code', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('property_type', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('postal_code', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('ber_code', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('bedrooms', sa.Integer(), nullable=True),
sa.Column('bathrooms', sa.Integer(), nullable=True),
sa.Column('price', sa.Integer(), nullable=True),
sa.Column('rating_auto', sa.Integer(), nullable=True),
sa.Column('rating_user', sa.Integer(), nullable=True),
sa.Column('telegram_sent_at', sa.DateTime(), nullable=True),
sa.Column('images_count', sa.Integer(), nullable=True),
sa.Column('latitude', sa.Float(), nullable=True),
sa.Column('longitude', sa.Float(), nullable=True),
sa.Column('notes', | sqlmodel.sql.sqltypes.AutoString() | sqlmodel.sql.sqltypes.AutoString |
"""Initial
Revision ID: d63ccd5484d7
Revises:
Create Date: 2021-11-14 00:28:55.123695
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('facilities',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('category', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('notes', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_facilities_category'), 'facilities', ['category'], unique=False)
op.create_index(op.f('ix_facilities_created_at'), 'facilities', ['created_at'], unique=False)
op.create_index(op.f('ix_facilities_id'), 'facilities', ['id'], unique=False)
op.create_index(op.f('ix_facilities_name'), 'facilities', ['name'], unique=False)
op.create_index(op.f('ix_facilities_notes'), 'facilities', ['notes'], unique=False)
op.create_index(op.f('ix_facilities_updated_at'), 'facilities', ['updated_at'], unique=False)
op.create_table('increment',
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_increment_id'), 'increment', ['id'], unique=False)
op.create_table('listings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('title', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('description', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('url', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source_id', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source_code', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('address', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('short_postal_code', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('property_type', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('postal_code', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('ber_code', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('bedrooms', sa.Integer(), nullable=True),
sa.Column('bathrooms', sa.Integer(), nullable=True),
sa.Column('price', sa.Integer(), nullable=True),
sa.Column('rating_auto', sa.Integer(), nullable=True),
sa.Column('rating_user', sa.Integer(), nullable=True),
sa.Column('telegram_sent_at', sa.DateTime(), nullable=True),
sa.Column('images_count', sa.Integer(), nullable=True),
sa.Column('latitude', sa.Float(), nullable=True),
sa.Column('longitude', sa.Float(), nullable=True),
sa.Column('notes', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('publish_date', sa.DateTime(), nullable=True),
sa.Column('last_updated', sa.DateTime(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_listings_address'), 'listings', ['address'], unique=False)
op.create_index(op.f('ix_listings_bathrooms'), 'listings', ['bathrooms'], unique=False)
op.create_index(op.f('ix_listings_bedrooms'), 'listings', ['bedrooms'], unique=False)
op.create_index(op.f('ix_listings_ber_code'), 'listings', ['ber_code'], unique=False)
op.create_index(op.f('ix_listings_created_at'), 'listings', ['created_at'], unique=False)
op.create_index(op.f('ix_listings_description'), 'listings', ['description'], unique=False)
op.create_index(op.f('ix_listings_id'), 'listings', ['id'], unique=False)
op.create_index(op.f('ix_listings_images_count'), 'listings', ['images_count'], unique=False)
op.create_index(op.f('ix_listings_is_active'), 'listings', ['is_active'], unique=False)
op.create_index(op.f('ix_listings_last_updated'), 'listings', ['last_updated'], unique=False)
op.create_index(op.f('ix_listings_latitude'), 'listings', ['latitude'], unique=False)
op.create_index(op.f('ix_listings_longitude'), 'listings', ['longitude'], unique=False)
op.create_index(op.f('ix_listings_notes'), 'listings', ['notes'], unique=False)
op.create_index(op.f('ix_listings_postal_code'), 'listings', ['postal_code'], unique=False)
op.create_index(op.f('ix_listings_price'), 'listings', ['price'], unique=False)
op.create_index(op.f('ix_listings_property_type'), 'listings', ['property_type'], unique=False)
op.create_index(op.f('ix_listings_publish_date'), 'listings', ['publish_date'], unique=False)
op.create_index(op.f('ix_listings_rating_auto'), 'listings', ['rating_auto'], unique=False)
op.create_index(op.f('ix_listings_rating_user'), 'listings', ['rating_user'], unique=False)
op.create_index(op.f('ix_listings_short_postal_code'), 'listings', ['short_postal_code'], unique=False)
op.create_index(op.f('ix_listings_source'), 'listings', ['source'], unique=False)
op.create_index(op.f('ix_listings_source_code'), 'listings', ['source_code'], unique=False)
op.create_index(op.f('ix_listings_source_id'), 'listings', ['source_id'], unique=False)
op.create_index(op.f('ix_listings_telegram_sent_at'), 'listings', ['telegram_sent_at'], unique=False)
op.create_index(op.f('ix_listings_title'), 'listings', ['title'], unique=False)
op.create_index(op.f('ix_listings_updated_at'), 'listings', ['updated_at'], unique=False)
op.create_index(op.f('ix_listings_url'), 'listings', ['url'], unique=False)
op.create_table('song',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', | sqlmodel.sql.sqltypes.AutoString() | sqlmodel.sql.sqltypes.AutoString |
"""Initial
Revision ID: d63ccd5484d7
Revises:
Create Date: 2021-11-14 00:28:55.123695
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('facilities',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('category', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('notes', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_facilities_category'), 'facilities', ['category'], unique=False)
op.create_index(op.f('ix_facilities_created_at'), 'facilities', ['created_at'], unique=False)
op.create_index(op.f('ix_facilities_id'), 'facilities', ['id'], unique=False)
op.create_index(op.f('ix_facilities_name'), 'facilities', ['name'], unique=False)
op.create_index(op.f('ix_facilities_notes'), 'facilities', ['notes'], unique=False)
op.create_index(op.f('ix_facilities_updated_at'), 'facilities', ['updated_at'], unique=False)
op.create_table('increment',
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_increment_id'), 'increment', ['id'], unique=False)
op.create_table('listings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('title', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('description', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('url', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source_id', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source_code', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('address', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('short_postal_code', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('property_type', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('postal_code', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('ber_code', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('bedrooms', sa.Integer(), nullable=True),
sa.Column('bathrooms', sa.Integer(), nullable=True),
sa.Column('price', sa.Integer(), nullable=True),
sa.Column('rating_auto', sa.Integer(), nullable=True),
sa.Column('rating_user', sa.Integer(), nullable=True),
sa.Column('telegram_sent_at', sa.DateTime(), nullable=True),
sa.Column('images_count', sa.Integer(), nullable=True),
sa.Column('latitude', sa.Float(), nullable=True),
sa.Column('longitude', sa.Float(), nullable=True),
sa.Column('notes', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('publish_date', sa.DateTime(), nullable=True),
sa.Column('last_updated', sa.DateTime(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_listings_address'), 'listings', ['address'], unique=False)
op.create_index(op.f('ix_listings_bathrooms'), 'listings', ['bathrooms'], unique=False)
op.create_index(op.f('ix_listings_bedrooms'), 'listings', ['bedrooms'], unique=False)
op.create_index(op.f('ix_listings_ber_code'), 'listings', ['ber_code'], unique=False)
op.create_index(op.f('ix_listings_created_at'), 'listings', ['created_at'], unique=False)
op.create_index(op.f('ix_listings_description'), 'listings', ['description'], unique=False)
op.create_index(op.f('ix_listings_id'), 'listings', ['id'], unique=False)
op.create_index(op.f('ix_listings_images_count'), 'listings', ['images_count'], unique=False)
op.create_index(op.f('ix_listings_is_active'), 'listings', ['is_active'], unique=False)
op.create_index(op.f('ix_listings_last_updated'), 'listings', ['last_updated'], unique=False)
op.create_index(op.f('ix_listings_latitude'), 'listings', ['latitude'], unique=False)
op.create_index(op.f('ix_listings_longitude'), 'listings', ['longitude'], unique=False)
op.create_index(op.f('ix_listings_notes'), 'listings', ['notes'], unique=False)
op.create_index(op.f('ix_listings_postal_code'), 'listings', ['postal_code'], unique=False)
op.create_index(op.f('ix_listings_price'), 'listings', ['price'], unique=False)
op.create_index(op.f('ix_listings_property_type'), 'listings', ['property_type'], unique=False)
op.create_index(op.f('ix_listings_publish_date'), 'listings', ['publish_date'], unique=False)
op.create_index(op.f('ix_listings_rating_auto'), 'listings', ['rating_auto'], unique=False)
op.create_index(op.f('ix_listings_rating_user'), 'listings', ['rating_user'], unique=False)
op.create_index(op.f('ix_listings_short_postal_code'), 'listings', ['short_postal_code'], unique=False)
op.create_index(op.f('ix_listings_source'), 'listings', ['source'], unique=False)
op.create_index(op.f('ix_listings_source_code'), 'listings', ['source_code'], unique=False)
op.create_index(op.f('ix_listings_source_id'), 'listings', ['source_id'], unique=False)
op.create_index(op.f('ix_listings_telegram_sent_at'), 'listings', ['telegram_sent_at'], unique=False)
op.create_index(op.f('ix_listings_title'), 'listings', ['title'], unique=False)
op.create_index(op.f('ix_listings_updated_at'), 'listings', ['updated_at'], unique=False)
op.create_index(op.f('ix_listings_url'), 'listings', ['url'], unique=False)
op.create_table('song',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('artist', | sqlmodel.sql.sqltypes.AutoString() | sqlmodel.sql.sqltypes.AutoString |
"""Initial
Revision ID: d63ccd5484d7
Revises:
Create Date: 2021-11-14 00:28:55.123695
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('facilities',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('category', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('notes', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_facilities_category'), 'facilities', ['category'], unique=False)
op.create_index(op.f('ix_facilities_created_at'), 'facilities', ['created_at'], unique=False)
op.create_index(op.f('ix_facilities_id'), 'facilities', ['id'], unique=False)
op.create_index(op.f('ix_facilities_name'), 'facilities', ['name'], unique=False)
op.create_index(op.f('ix_facilities_notes'), 'facilities', ['notes'], unique=False)
op.create_index(op.f('ix_facilities_updated_at'), 'facilities', ['updated_at'], unique=False)
op.create_table('increment',
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_increment_id'), 'increment', ['id'], unique=False)
op.create_table('listings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('title', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('description', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('url', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source_id', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source_code', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('address', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('short_postal_code', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('property_type', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('postal_code', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('ber_code', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('bedrooms', sa.Integer(), nullable=True),
sa.Column('bathrooms', sa.Integer(), nullable=True),
sa.Column('price', sa.Integer(), nullable=True),
sa.Column('rating_auto', sa.Integer(), nullable=True),
sa.Column('rating_user', sa.Integer(), nullable=True),
sa.Column('telegram_sent_at', sa.DateTime(), nullable=True),
sa.Column('images_count', sa.Integer(), nullable=True),
sa.Column('latitude', sa.Float(), nullable=True),
sa.Column('longitude', sa.Float(), nullable=True),
sa.Column('notes', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('publish_date', sa.DateTime(), nullable=True),
sa.Column('last_updated', sa.DateTime(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_listings_address'), 'listings', ['address'], unique=False)
op.create_index(op.f('ix_listings_bathrooms'), 'listings', ['bathrooms'], unique=False)
op.create_index(op.f('ix_listings_bedrooms'), 'listings', ['bedrooms'], unique=False)
op.create_index(op.f('ix_listings_ber_code'), 'listings', ['ber_code'], unique=False)
op.create_index(op.f('ix_listings_created_at'), 'listings', ['created_at'], unique=False)
op.create_index(op.f('ix_listings_description'), 'listings', ['description'], unique=False)
op.create_index(op.f('ix_listings_id'), 'listings', ['id'], unique=False)
op.create_index(op.f('ix_listings_images_count'), 'listings', ['images_count'], unique=False)
op.create_index(op.f('ix_listings_is_active'), 'listings', ['is_active'], unique=False)
op.create_index(op.f('ix_listings_last_updated'), 'listings', ['last_updated'], unique=False)
op.create_index(op.f('ix_listings_latitude'), 'listings', ['latitude'], unique=False)
op.create_index(op.f('ix_listings_longitude'), 'listings', ['longitude'], unique=False)
op.create_index(op.f('ix_listings_notes'), 'listings', ['notes'], unique=False)
op.create_index(op.f('ix_listings_postal_code'), 'listings', ['postal_code'], unique=False)
op.create_index(op.f('ix_listings_price'), 'listings', ['price'], unique=False)
op.create_index(op.f('ix_listings_property_type'), 'listings', ['property_type'], unique=False)
op.create_index(op.f('ix_listings_publish_date'), 'listings', ['publish_date'], unique=False)
op.create_index(op.f('ix_listings_rating_auto'), 'listings', ['rating_auto'], unique=False)
op.create_index(op.f('ix_listings_rating_user'), 'listings', ['rating_user'], unique=False)
op.create_index(op.f('ix_listings_short_postal_code'), 'listings', ['short_postal_code'], unique=False)
op.create_index(op.f('ix_listings_source'), 'listings', ['source'], unique=False)
op.create_index(op.f('ix_listings_source_code'), 'listings', ['source_code'], unique=False)
op.create_index(op.f('ix_listings_source_id'), 'listings', ['source_id'], unique=False)
op.create_index(op.f('ix_listings_telegram_sent_at'), 'listings', ['telegram_sent_at'], unique=False)
op.create_index(op.f('ix_listings_title'), 'listings', ['title'], unique=False)
op.create_index(op.f('ix_listings_updated_at'), 'listings', ['updated_at'], unique=False)
op.create_index(op.f('ix_listings_url'), 'listings', ['url'], unique=False)
op.create_table('song',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('artist', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('year', sa.Integer(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_song_artist'), 'song', ['artist'], unique=False)
op.create_index(op.f('ix_song_created_at'), 'song', ['created_at'], unique=False)
op.create_index(op.f('ix_song_id'), 'song', ['id'], unique=False)
op.create_index(op.f('ix_song_name'), 'song', ['name'], unique=False)
op.create_index(op.f('ix_song_updated_at'), 'song', ['updated_at'], unique=False)
op.create_index(op.f('ix_song_year'), 'song', ['year'], unique=False)
op.create_table('images',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('url', | sqlmodel.sql.sqltypes.AutoString() | sqlmodel.sql.sqltypes.AutoString |
from sqlite3.dbapi2 import Timestamp, adapt
from typing import Optional
from sqlmodel import Field, SQLModel
from pydantic import validator
from datetime import datetime, date
class Rate(SQLModel, table=True):
id: Optional[int] = | Field(default=None, primary_key=True) | sqlmodel.Field |
from sqlite3.dbapi2 import Timestamp, adapt
from typing import Optional
from sqlmodel import Field, SQLModel
from pydantic import validator
from datetime import datetime, date
class Rate(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
user_id: int = | Field(foreign_key="user.id") | sqlmodel.Field |
from sqlite3.dbapi2 import Timestamp, adapt
from typing import Optional
from sqlmodel import Field, SQLModel
from pydantic import validator
from datetime import datetime, date
class Rate(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
user_id: int = Field(foreign_key="user.id")
client_id: int = | Field(foreign_key="client.id") | sqlmodel.Field |
import uuid
from typing import List
from typing import Optional
from sqlalchemy.engine import Engine
from sqlmodel import Field
from sqlmodel import Relationship
from sqlmodel import Session
from sqlmodel import SQLModel
from sqlmodel import create_engine
_engine: Optional[Engine] = None
class FileTagAssociation(SQLModel, table=True):
file_id: Optional[uuid.UUID] = Field(
default=None, foreign_key="file.id", primary_key=True
)
tag_id: Optional[uuid.UUID] = Field(
default=None, foreign_key="tag.id", primary_key=True
)
class File(SQLModel, table=True):
id: Optional[uuid.UUID] = | Field(default_factory=uuid.uuid4, primary_key=True) | sqlmodel.Field |
import uuid
from typing import List
from typing import Optional
from sqlalchemy.engine import Engine
from sqlmodel import Field
from sqlmodel import Relationship
from sqlmodel import Session
from sqlmodel import SQLModel
from sqlmodel import create_engine
_engine: Optional[Engine] = None
class FileTagAssociation(SQLModel, table=True):
file_id: Optional[uuid.UUID] = Field(
default=None, foreign_key="file.id", primary_key=True
)
tag_id: Optional[uuid.UUID] = Field(
default=None, foreign_key="tag.id", primary_key=True
)
class File(SQLModel, table=True):
id: Optional[uuid.UUID] = Field(default_factory=uuid.uuid4, primary_key=True)
name: str
path: str
tags: List["Tag"] = Relationship(
back_populates="files", link_model=FileTagAssociation
)
class Tag(SQLModel, table=True):
id: Optional[uuid.UUID] = | Field(default_factory=uuid.uuid4, primary_key=True) | sqlmodel.Field |
import uuid
from typing import List
from typing import Optional
from sqlalchemy.engine import Engine
from sqlmodel import Field
from sqlmodel import Relationship
from sqlmodel import Session
from sqlmodel import SQLModel
from sqlmodel import create_engine
_engine: Optional[Engine] = None
class FileTagAssociation(SQLModel, table=True):
file_id: Optional[uuid.UUID] = Field(
default=None, foreign_key="file.id", primary_key=True
)
tag_id: Optional[uuid.UUID] = Field(
default=None, foreign_key="tag.id", primary_key=True
)
class File(SQLModel, table=True):
id: Optional[uuid.UUID] = Field(default_factory=uuid.uuid4, primary_key=True)
name: str
path: str
tags: List["Tag"] = Relationship(
back_populates="files", link_model=FileTagAssociation
)
class Tag(SQLModel, table=True):
id: Optional[uuid.UUID] = Field(default_factory=uuid.uuid4, primary_key=True)
category: Optional[str]
value: str
files: List[File] = Relationship(
back_populates="tags", link_model=FileTagAssociation
)
def init(url: str) -> None:
global _engine
_engine = | create_engine(url) | sqlmodel.create_engine |
import uuid
from typing import List
from typing import Optional
from sqlalchemy.engine import Engine
from sqlmodel import Field
from sqlmodel import Relationship
from sqlmodel import Session
from sqlmodel import SQLModel
from sqlmodel import create_engine
_engine: Optional[Engine] = None
class FileTagAssociation(SQLModel, table=True):
file_id: Optional[uuid.UUID] = Field(
default=None, foreign_key="file.id", primary_key=True
)
tag_id: Optional[uuid.UUID] = Field(
default=None, foreign_key="tag.id", primary_key=True
)
class File(SQLModel, table=True):
id: Optional[uuid.UUID] = Field(default_factory=uuid.uuid4, primary_key=True)
name: str
path: str
tags: List["Tag"] = Relationship(
back_populates="files", link_model=FileTagAssociation
)
class Tag(SQLModel, table=True):
id: Optional[uuid.UUID] = Field(default_factory=uuid.uuid4, primary_key=True)
category: Optional[str]
value: str
files: List[File] = Relationship(
back_populates="tags", link_model=FileTagAssociation
)
def init(url: str) -> None:
global _engine
_engine = create_engine(url)
| SQLModel.metadata.create_all(_engine) | sqlmodel.SQLModel.metadata.create_all |
from typing import Optional
from sqlmodel import Field, Session, SQLModel, create_engine
def test_query(clear_sqlmodel):
class Hero(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
secret_name: str
age: Optional[int] = None
hero_1 = Hero(name="Deadpond", secret_name="<NAME>")
engine = | create_engine("sqlite://") | sqlmodel.create_engine |
from typing import Optional
from sqlmodel import Field, Session, SQLModel, create_engine
def test_query(clear_sqlmodel):
class Hero(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
secret_name: str
age: Optional[int] = None
hero_1 = Hero(name="Deadpond", secret_name="<NAME>")
engine = create_engine("sqlite://")
| SQLModel.metadata.create_all(engine) | sqlmodel.SQLModel.metadata.create_all |
from typing import Optional
from sqlmodel import Field, Session, SQLModel, create_engine
def test_query(clear_sqlmodel):
class Hero(SQLModel, table=True):
id: Optional[int] = | Field(default=None, primary_key=True) | sqlmodel.Field |
from typing import Optional
from sqlmodel import Field, Session, SQLModel, create_engine
def test_query(clear_sqlmodel):
class Hero(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
secret_name: str
age: Optional[int] = None
hero_1 = Hero(name="Deadpond", secret_name="<NAME>")
engine = create_engine("sqlite://")
SQLModel.metadata.create_all(engine)
with | Session(engine) | sqlmodel.Session |
from typing import Optional
from sqlmodel import Field, Session, SQLModel, create_engine
def test_query(clear_sqlmodel):
class Hero(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
secret_name: str
age: Optional[int] = None
hero_1 = Hero(name="Deadpond", secret_name="<NAME>")
engine = create_engine("sqlite://")
SQLModel.metadata.create_all(engine)
with Session(engine) as session:
session.add(hero_1)
session.commit()
session.refresh(hero_1)
with | Session(engine) | sqlmodel.Session |
import typing as t
from sqlmodel import SQLModel, Field, Relationship
from datetime import datetime
from .users import DB_User
class DB_AccessToken(SQLModel, table=True):
__tablename__ = 'access_tokens'
id: t.Optional[int] = | Field(default=None, primary_key=True) | sqlmodel.Field |
import typing as t
from sqlmodel import SQLModel, Field, Relationship
from datetime import datetime
from .users import DB_User
class DB_AccessToken(SQLModel, table=True):
__tablename__ = 'access_tokens'
id: t.Optional[int] = Field(default=None, primary_key=True)
"""The ID of the access token. This is handled by the database."""
token: str = | Field(max_length=40, sa_column_kwargs={'unique': True}) | sqlmodel.Field |
import typing as t
from sqlmodel import SQLModel, Field, Relationship
from datetime import datetime
from .users import DB_User
class DB_AccessToken(SQLModel, table=True):
__tablename__ = 'access_tokens'
id: t.Optional[int] = Field(default=None, primary_key=True)
"""The ID of the access token. This is handled by the database."""
token: str = Field(max_length=40, sa_column_kwargs={'unique': True})
"""The unique access token."""
user_id: t.Optional[int] = | Field(default=None, foreign_key="users.id") | sqlmodel.Field |
import typing as t
from sqlmodel import SQLModel, Field, Relationship
from datetime import datetime
from .users import DB_User
class DB_AccessToken(SQLModel, table=True):
__tablename__ = 'access_tokens'
id: t.Optional[int] = Field(default=None, primary_key=True)
"""The ID of the access token. This is handled by the database."""
token: str = Field(max_length=40, sa_column_kwargs={'unique': True})
"""The unique access token."""
user_id: t.Optional[int] = Field(default=None, foreign_key="users.id")
user: DB_User = | Relationship(back_populates="access_tokens") | sqlmodel.Relationship |
import typing as t
from sqlmodel import SQLModel, Field, Relationship
from datetime import datetime
from .users import DB_User
class DB_AccessToken(SQLModel, table=True):
__tablename__ = 'access_tokens'
id: t.Optional[int] = Field(default=None, primary_key=True)
"""The ID of the access token. This is handled by the database."""
token: str = Field(max_length=40, sa_column_kwargs={'unique': True})
"""The unique access token."""
user_id: t.Optional[int] = Field(default=None, foreign_key="users.id")
user: DB_User = Relationship(back_populates="access_tokens")
"""To what user does this token belong to."""
last_activity_at: datetime
"""When was the access token last active."""
created_at: datetime
"""When was this token created at?"""
type: str = | Field(max_length=100) | sqlmodel.Field |
import typing as t
from sqlmodel import SQLModel, Field, Relationship
from datetime import datetime
from .users import DB_User
class DB_AccessToken(SQLModel, table=True):
__tablename__ = 'access_tokens'
id: t.Optional[int] = Field(default=None, primary_key=True)
"""The ID of the access token. This is handled by the database."""
token: str = Field(max_length=40, sa_column_kwargs={'unique': True})
"""The unique access token."""
user_id: t.Optional[int] = Field(default=None, foreign_key="users.id")
user: DB_User = Relationship(back_populates="access_tokens")
"""To what user does this token belong to."""
last_activity_at: datetime
"""When was the access token last active."""
created_at: datetime
"""When was this token created at?"""
type: str = Field(max_length=100)
"""The type of the access token (example: `'session_remember'`)"""
title: t.Optional[str] = | Field(max_length=150) | sqlmodel.Field |
import typing as t
from sqlmodel import SQLModel, Field, Relationship
from datetime import datetime
from .users import DB_User
class DB_AccessToken(SQLModel, table=True):
__tablename__ = 'access_tokens'
id: t.Optional[int] = Field(default=None, primary_key=True)
"""The ID of the access token. This is handled by the database."""
token: str = Field(max_length=40, sa_column_kwargs={'unique': True})
"""The unique access token."""
user_id: t.Optional[int] = Field(default=None, foreign_key="users.id")
user: DB_User = Relationship(back_populates="access_tokens")
"""To what user does this token belong to."""
last_activity_at: datetime
"""When was the access token last active."""
created_at: datetime
"""When was this token created at?"""
type: str = Field(max_length=100)
"""The type of the access token (example: `'session_remember'`)"""
title: t.Optional[str] = Field(max_length=150)
"""The title of the access token."""
last_ip_address: t.Optional[str] = | Field(max_length=45) | sqlmodel.Field |
import typing as t
from sqlmodel import SQLModel, Field, Relationship
from datetime import datetime
from .users import DB_User
class DB_AccessToken(SQLModel, table=True):
__tablename__ = 'access_tokens'
id: t.Optional[int] = Field(default=None, primary_key=True)
"""The ID of the access token. This is handled by the database."""
token: str = Field(max_length=40, sa_column_kwargs={'unique': True})
"""The unique access token."""
user_id: t.Optional[int] = Field(default=None, foreign_key="users.id")
user: DB_User = Relationship(back_populates="access_tokens")
"""To what user does this token belong to."""
last_activity_at: datetime
"""When was the access token last active."""
created_at: datetime
"""When was this token created at?"""
type: str = Field(max_length=100)
"""The type of the access token (example: `'session_remember'`)"""
title: t.Optional[str] = Field(max_length=150)
"""The title of the access token."""
last_ip_address: t.Optional[str] = Field(max_length=45)
"""The last IP address associated with this access token."""
last_user_agent: t.Optional[str] = | Field(max_length=255) | sqlmodel.Field |
import typing as t
from sqlmodel import SQLModel, Field, Relationship
from datetime import datetime
from .users import DB_User
class DB_AccessToken(SQLModel, table=True):
__tablename__ = 'access_tokens'
id: t.Optional[int] = Field(default=None, primary_key=True)
"""The ID of the access token. This is handled by the database."""
token: str = Field(max_length=40, sa_column_kwargs={'unique': True})
"""The unique access token."""
user_id: t.Optional[int] = Field(default=None, foreign_key="users.id")
user: DB_User = Relationship(back_populates="access_tokens")
"""To what user does this token belong to."""
last_activity_at: datetime
"""When was the access token last active."""
created_at: datetime
"""When was this token created at?"""
type: str = Field(max_length=100)
"""The type of the access token (example: `'session_remember'`)"""
title: t.Optional[str] = Field(max_length=150)
"""The title of the access token."""
last_ip_address: t.Optional[str] = Field(max_length=45)
"""The last IP address associated with this access token."""
last_user_agent: t.Optional[str] = Field(max_length=255)
"""The last browser's user agent that used this token."""
class DB_APIKey(SQLModel, table=True):
__tablename__ = 'api_keys'
id: t.Optional[int] = | Field(default=None, primary_key=True) | sqlmodel.Field |
import typing as t
from sqlmodel import SQLModel, Field, Relationship
from datetime import datetime
from .users import DB_User
class DB_AccessToken(SQLModel, table=True):
__tablename__ = 'access_tokens'
id: t.Optional[int] = Field(default=None, primary_key=True)
"""The ID of the access token. This is handled by the database."""
token: str = Field(max_length=40, sa_column_kwargs={'unique': True})
"""The unique access token."""
user_id: t.Optional[int] = Field(default=None, foreign_key="users.id")
user: DB_User = Relationship(back_populates="access_tokens")
"""To what user does this token belong to."""
last_activity_at: datetime
"""When was the access token last active."""
created_at: datetime
"""When was this token created at?"""
type: str = Field(max_length=100)
"""The type of the access token (example: `'session_remember'`)"""
title: t.Optional[str] = Field(max_length=150)
"""The title of the access token."""
last_ip_address: t.Optional[str] = Field(max_length=45)
"""The last IP address associated with this access token."""
last_user_agent: t.Optional[str] = Field(max_length=255)
"""The last browser's user agent that used this token."""
class DB_APIKey(SQLModel, table=True):
__tablename__ = 'api_keys'
id: t.Optional[int] = Field(default=None, primary_key=True)
"""The ID of the API key. This is handled by the database."""
key: str = | Field(max_length=100) | sqlmodel.Field |
import typing as t
from sqlmodel import SQLModel, Field, Relationship
from datetime import datetime
from .users import DB_User
class DB_AccessToken(SQLModel, table=True):
__tablename__ = 'access_tokens'
id: t.Optional[int] = Field(default=None, primary_key=True)
"""The ID of the access token. This is handled by the database."""
token: str = Field(max_length=40, sa_column_kwargs={'unique': True})
"""The unique access token."""
user_id: t.Optional[int] = Field(default=None, foreign_key="users.id")
user: DB_User = Relationship(back_populates="access_tokens")
"""To what user does this token belong to."""
last_activity_at: datetime
"""When was the access token last active."""
created_at: datetime
"""When was this token created at?"""
type: str = Field(max_length=100)
"""The type of the access token (example: `'session_remember'`)"""
title: t.Optional[str] = Field(max_length=150)
"""The title of the access token."""
last_ip_address: t.Optional[str] = Field(max_length=45)
"""The last IP address associated with this access token."""
last_user_agent: t.Optional[str] = Field(max_length=255)
"""The last browser's user agent that used this token."""
class DB_APIKey(SQLModel, table=True):
__tablename__ = 'api_keys'
id: t.Optional[int] = Field(default=None, primary_key=True)
"""The ID of the API key. This is handled by the database."""
key: str = Field(max_length=100)
"""The unique API key."""
allowedips: t.Optional[str] = | Field(max_length=255) | sqlmodel.Field |
import typing as t
from sqlmodel import SQLModel, Field, Relationship
from datetime import datetime
from .users import DB_User
class DB_AccessToken(SQLModel, table=True):
__tablename__ = 'access_tokens'
id: t.Optional[int] = Field(default=None, primary_key=True)
"""The ID of the access token. This is handled by the database."""
token: str = Field(max_length=40, sa_column_kwargs={'unique': True})
"""The unique access token."""
user_id: t.Optional[int] = Field(default=None, foreign_key="users.id")
user: DB_User = Relationship(back_populates="access_tokens")
"""To what user does this token belong to."""
last_activity_at: datetime
"""When was the access token last active."""
created_at: datetime
"""When was this token created at?"""
type: str = Field(max_length=100)
"""The type of the access token (example: `'session_remember'`)"""
title: t.Optional[str] = Field(max_length=150)
"""The title of the access token."""
last_ip_address: t.Optional[str] = Field(max_length=45)
"""The last IP address associated with this access token."""
last_user_agent: t.Optional[str] = Field(max_length=255)
"""The last browser's user agent that used this token."""
class DB_APIKey(SQLModel, table=True):
__tablename__ = 'api_keys'
id: t.Optional[int] = Field(default=None, primary_key=True)
"""The ID of the API key. This is handled by the database."""
key: str = Field(max_length=100)
"""The unique API key."""
allowedips: t.Optional[str] = Field(max_length=255)
"""The IP addresses that are allowed to use this API key."""
scopes: t.Optional[str] = | Field(max_length=255) | sqlmodel.Field |
import typing as t
from sqlmodel import SQLModel, Field, Relationship
from datetime import datetime
from .users import DB_User
class DB_AccessToken(SQLModel, table=True):
__tablename__ = 'access_tokens'
id: t.Optional[int] = Field(default=None, primary_key=True)
"""The ID of the access token. This is handled by the database."""
token: str = Field(max_length=40, sa_column_kwargs={'unique': True})
"""The unique access token."""
user_id: t.Optional[int] = Field(default=None, foreign_key="users.id")
user: DB_User = Relationship(back_populates="access_tokens")
"""To what user does this token belong to."""
last_activity_at: datetime
"""When was the access token last active."""
created_at: datetime
"""When was this token created at?"""
type: str = Field(max_length=100)
"""The type of the access token (example: `'session_remember'`)"""
title: t.Optional[str] = Field(max_length=150)
"""The title of the access token."""
last_ip_address: t.Optional[str] = Field(max_length=45)
"""The last IP address associated with this access token."""
last_user_agent: t.Optional[str] = Field(max_length=255)
"""The last browser's user agent that used this token."""
class DB_APIKey(SQLModel, table=True):
__tablename__ = 'api_keys'
id: t.Optional[int] = Field(default=None, primary_key=True)
"""The ID of the API key. This is handled by the database."""
key: str = Field(max_length=100)
"""The unique API key."""
allowedips: t.Optional[str] = Field(max_length=255)
"""The IP addresses that are allowed to use this API key."""
scopes: t.Optional[str] = Field(max_length=255)
"""The scopes that this API key has access to."""
user_id: t.Optional[int] = | Field(default=None, foreign_key="users.id") | sqlmodel.Field |
import typing as t
from sqlmodel import SQLModel, Field, Relationship
from datetime import datetime
from .users import DB_User
class DB_AccessToken(SQLModel, table=True):
__tablename__ = 'access_tokens'
id: t.Optional[int] = Field(default=None, primary_key=True)
"""The ID of the access token. This is handled by the database."""
token: str = Field(max_length=40, sa_column_kwargs={'unique': True})
"""The unique access token."""
user_id: t.Optional[int] = Field(default=None, foreign_key="users.id")
user: DB_User = Relationship(back_populates="access_tokens")
"""To what user does this token belong to."""
last_activity_at: datetime
"""When was the access token last active."""
created_at: datetime
"""When was this token created at?"""
type: str = Field(max_length=100)
"""The type of the access token (example: `'session_remember'`)"""
title: t.Optional[str] = Field(max_length=150)
"""The title of the access token."""
last_ip_address: t.Optional[str] = Field(max_length=45)
"""The last IP address associated with this access token."""
last_user_agent: t.Optional[str] = Field(max_length=255)
"""The last browser's user agent that used this token."""
class DB_APIKey(SQLModel, table=True):
__tablename__ = 'api_keys'
id: t.Optional[int] = Field(default=None, primary_key=True)
"""The ID of the API key. This is handled by the database."""
key: str = Field(max_length=100)
"""The unique API key."""
allowedips: t.Optional[str] = Field(max_length=255)
"""The IP addresses that are allowed to use this API key."""
scopes: t.Optional[str] = Field(max_length=255)
"""The scopes that this API key has access to."""
user_id: t.Optional[int] = Field(default=None, foreign_key="users.id")
user: DB_User = | Relationship(back_populates="api_keys") | sqlmodel.Relationship |
# Copyright 2021 Modelyst LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Objects related to the running of Models and Generators."""
from bdb import BdbQuit
from datetime import datetime, timedelta
from time import time
from typing import Any, Dict, List, Mapping, Optional, Set, Tuple
from uuid import UUID
from psycopg import connect as pg3_connect
from pydantic.fields import Field, PrivateAttr
from pydasher import hasher
from sqlalchemy.future import Engine
from sqlmodel import Session, select
from tqdm import tqdm
import dbgen.exceptions as exceptions
from dbgen.core.base import Base, encoders
from dbgen.core.generator import Generator
from dbgen.core.metadata import (
GeneratorEntity,
GeneratorRunEntity,
GensToRun,
ModelEntity,
Repeats,
RunEntity,
Status,
)
from dbgen.core.model import Model
from dbgen.core.node.extract import Extract
from dbgen.core.node.query import BaseQuery
from dbgen.exceptions import DBgenExternalError, DBgenSkipException, RepeatException, SerializationError
from dbgen.utils.log import LogLevel
class RunConfig(Base):
"""Configuration for the running of a Generator and Model"""
retry: bool = False
include: Set[str] = Field(default_factory=set)
exclude: Set[str] = Field(default_factory=set)
start: Optional[str]
until: Optional[str]
batch_size: Optional[int]
progress_bar: bool = True
log_level: LogLevel = LogLevel.INFO
def should_gen_run(self, generator: Generator) -> bool:
"""Check a generator against include/exclude to see if it should run."""
markers = [generator.name, *generator.tags]
should_run = any(
map(
lambda x: (not self.include or x in self.include) and x not in self.exclude,
markers,
)
)
return should_run
def get_invalid_markers(self, model: Model) -> Dict[str, List[str]]:
"""Check that all inputs to RunConfig are meaningful for the model."""
invalid_marks = {}
gen_names = model.gens().keys()
# Validate start and until
for attr in ("start", "until"):
val: str = getattr(self, attr)
if val is not None and val not in gen_names:
invalid_marks[attr] = [val]
# Validate include and exclude as sets
for attr in ("include", "exclude"):
set_val: Set[str] = getattr(self, attr)
invalid_vals = [x for x in set_val if not model.validate_marker(x)]
if invalid_vals:
invalid_marks[attr] = invalid_vals
return invalid_marks
def update_run_by_id(run_id, status: Status, session: Session):
run = session.get(RunEntity, run_id)
assert run, f"No run found with id {run_id}"
run.status = status
session.commit()
class RunInitializer(Base):
"""Intializes a run by syncing the database and getting the run_id."""
def execute(self, engine: Engine, run_config: RunConfig) -> int:
# Use some metadatabase connection to initialize initialize the run
# Store the details of the run on the metadatabase so downstream GeneratorRuns can pick them up
# Sync the database with the registries
with Session(engine) as session:
run = RunEntity(status=Status.initialized)
session.add(run)
session.commit()
session.refresh(run)
assert isinstance(run.id, int)
run.status = Status.running
session.commit()
run_id = run.id
return run_id
class BaseGeneratorRun(Base):
"""A lightwieght wrapper for the Generator that grabs a specific Generator from metadatabase and runs it."""
_old_repeats: Set[UUID] = PrivateAttr(default_factory=set)
_new_repeats: Set[UUID] = PrivateAttr(default_factory=set)
def get_gen(self, meta_engine: Engine, *args, **kwargs) -> Generator:
raise NotImplementedError
def execute(
self,
main_engine: Engine,
meta_engine: Engine,
run_id: Optional[int],
run_config: Optional[RunConfig],
ordering: Optional[int],
):
# Set default values for run_config if none provided
if run_config is None:
run_config = RunConfig()
generator = self.get_gen(meta_engine=meta_engine)
# Initialize the generator_row in the meta database
meta_session = | Session(meta_engine) | sqlmodel.Session |
# Copyright 2021 Modelyst LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Objects related to the running of Models and Generators."""
from bdb import BdbQuit
from datetime import datetime, timedelta
from time import time
from typing import Any, Dict, List, Mapping, Optional, Set, Tuple
from uuid import UUID
from psycopg import connect as pg3_connect
from pydantic.fields import Field, PrivateAttr
from pydasher import hasher
from sqlalchemy.future import Engine
from sqlmodel import Session, select
from tqdm import tqdm
import dbgen.exceptions as exceptions
from dbgen.core.base import Base, encoders
from dbgen.core.generator import Generator
from dbgen.core.metadata import (
GeneratorEntity,
GeneratorRunEntity,
GensToRun,
ModelEntity,
Repeats,
RunEntity,
Status,
)
from dbgen.core.model import Model
from dbgen.core.node.extract import Extract
from dbgen.core.node.query import BaseQuery
from dbgen.exceptions import DBgenExternalError, DBgenSkipException, RepeatException, SerializationError
from dbgen.utils.log import LogLevel
class RunConfig(Base):
"""Configuration for the running of a Generator and Model"""
retry: bool = False
include: Set[str] = Field(default_factory=set)
exclude: Set[str] = Field(default_factory=set)
start: Optional[str]
until: Optional[str]
batch_size: Optional[int]
progress_bar: bool = True
log_level: LogLevel = LogLevel.INFO
def should_gen_run(self, generator: Generator) -> bool:
"""Check a generator against include/exclude to see if it should run."""
markers = [generator.name, *generator.tags]
should_run = any(
map(
lambda x: (not self.include or x in self.include) and x not in self.exclude,
markers,
)
)
return should_run
def get_invalid_markers(self, model: Model) -> Dict[str, List[str]]:
"""Check that all inputs to RunConfig are meaningful for the model."""
invalid_marks = {}
gen_names = model.gens().keys()
# Validate start and until
for attr in ("start", "until"):
val: str = getattr(self, attr)
if val is not None and val not in gen_names:
invalid_marks[attr] = [val]
# Validate include and exclude as sets
for attr in ("include", "exclude"):
set_val: Set[str] = getattr(self, attr)
invalid_vals = [x for x in set_val if not model.validate_marker(x)]
if invalid_vals:
invalid_marks[attr] = invalid_vals
return invalid_marks
def update_run_by_id(run_id, status: Status, session: Session):
run = session.get(RunEntity, run_id)
assert run, f"No run found with id {run_id}"
run.status = status
session.commit()
class RunInitializer(Base):
"""Intializes a run by syncing the database and getting the run_id."""
def execute(self, engine: Engine, run_config: RunConfig) -> int:
# Use some metadatabase connection to initialize initialize the run
# Store the details of the run on the metadatabase so downstream GeneratorRuns can pick them up
# Sync the database with the registries
with | Session(engine) | sqlmodel.Session |
# Copyright 2021 Modelyst LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Objects related to the running of Models and Generators."""
from bdb import BdbQuit
from datetime import datetime, timedelta
from time import time
from typing import Any, Dict, List, Mapping, Optional, Set, Tuple
from uuid import UUID
from psycopg import connect as pg3_connect
from pydantic.fields import Field, PrivateAttr
from pydasher import hasher
from sqlalchemy.future import Engine
from sqlmodel import Session, select
from tqdm import tqdm
import dbgen.exceptions as exceptions
from dbgen.core.base import Base, encoders
from dbgen.core.generator import Generator
from dbgen.core.metadata import (
GeneratorEntity,
GeneratorRunEntity,
GensToRun,
ModelEntity,
Repeats,
RunEntity,
Status,
)
from dbgen.core.model import Model
from dbgen.core.node.extract import Extract
from dbgen.core.node.query import BaseQuery
from dbgen.exceptions import DBgenExternalError, DBgenSkipException, RepeatException, SerializationError
from dbgen.utils.log import LogLevel
class RunConfig(Base):
"""Configuration for the running of a Generator and Model"""
retry: bool = False
include: Set[str] = Field(default_factory=set)
exclude: Set[str] = Field(default_factory=set)
start: Optional[str]
until: Optional[str]
batch_size: Optional[int]
progress_bar: bool = True
log_level: LogLevel = LogLevel.INFO
def should_gen_run(self, generator: Generator) -> bool:
"""Check a generator against include/exclude to see if it should run."""
markers = [generator.name, *generator.tags]
should_run = any(
map(
lambda x: (not self.include or x in self.include) and x not in self.exclude,
markers,
)
)
return should_run
def get_invalid_markers(self, model: Model) -> Dict[str, List[str]]:
"""Check that all inputs to RunConfig are meaningful for the model."""
invalid_marks = {}
gen_names = model.gens().keys()
# Validate start and until
for attr in ("start", "until"):
val: str = getattr(self, attr)
if val is not None and val not in gen_names:
invalid_marks[attr] = [val]
# Validate include and exclude as sets
for attr in ("include", "exclude"):
set_val: Set[str] = getattr(self, attr)
invalid_vals = [x for x in set_val if not model.validate_marker(x)]
if invalid_vals:
invalid_marks[attr] = invalid_vals
return invalid_marks
def update_run_by_id(run_id, status: Status, session: Session):
run = session.get(RunEntity, run_id)
assert run, f"No run found with id {run_id}"
run.status = status
session.commit()
class RunInitializer(Base):
"""Intializes a run by syncing the database and getting the run_id."""
def execute(self, engine: Engine, run_config: RunConfig) -> int:
# Use some metadatabase connection to initialize initialize the run
# Store the details of the run on the metadatabase so downstream GeneratorRuns can pick them up
# Sync the database with the registries
with Session(engine) as session:
run = RunEntity(status=Status.initialized)
session.add(run)
session.commit()
session.refresh(run)
assert isinstance(run.id, int)
run.status = Status.running
session.commit()
run_id = run.id
return run_id
class BaseGeneratorRun(Base):
"""A lightwieght wrapper for the Generator that grabs a specific Generator from metadatabase and runs it."""
_old_repeats: Set[UUID] = PrivateAttr(default_factory=set)
_new_repeats: Set[UUID] = PrivateAttr(default_factory=set)
def get_gen(self, meta_engine: Engine, *args, **kwargs) -> Generator:
raise NotImplementedError
def execute(
self,
main_engine: Engine,
meta_engine: Engine,
run_id: Optional[int],
run_config: Optional[RunConfig],
ordering: Optional[int],
):
# Set default values for run_config if none provided
if run_config is None:
run_config = RunConfig()
generator = self.get_gen(meta_engine=meta_engine)
# Initialize the generator_row in the meta database
meta_session = Session(meta_engine)
gen_run = self._initialize_gen_run(
generator=generator, session=meta_session, run_id=run_id, ordering=ordering
)
# Check if our run config excludes our generator
if not run_config.should_gen_run(generator):
self._logger.info(f'Excluding generator {generator.name!r}')
gen_run.status = Status.excluded
meta_session.commit()
return
# Start the Generator
self._logger.info(f'Running generator {generator.name!r}...')
gen_run.status = Status.running
meta_session.commit()
start = time()
# Set the extractor
self._logger.debug('Initializing extractor')
extractor_connection = main_engine.connect()
extract = generator.extract
if isinstance(extract, BaseQuery):
extract.set_extractor(connection=extractor_connection)
else:
extract.set_extractor()
self._logger.debug('Fetching extractor length')
row_count = extract.length(connection=extractor_connection)
gen_run.inputs_extracted = row_count
meta_session.commit()
self._logger.debug('Fetching repeats')
# Query the repeats table for input_hashes that match this generator's hash
self._old_repeats = set(
meta_session.exec(select(Repeats.input_hash).where(Repeats.generator_id == generator.uuid)).all()
)
# The batch_size is set either on the run_config or the generator
batch_size = run_config.batch_size or generator.batch_size
assert batch_size is None or batch_size > 0, f"Invalid batch size batch_size must be >0: {batch_size}"
# Open raw connections for fast loading
main_raw_connection = pg3_connect(str(main_engine.url))
meta_raw_connection = meta_engine.raw_connection()
batch_done = lambda x: x % batch_size == 0 if batch_size is not None else False
# Start while loop to iterate through the nodes
self._logger.info('Looping through extracted rows...')
progress_bar = tqdm(
total=row_count,
position=1,
leave=False,
desc="Transforming...",
disable=not run_config.progress_bar,
)
try:
while True:
gen_run.inputs_processed += 1
row: Dict[str, Mapping[str, Any]] = {}
try:
for node in generator._sort_graph():
output = node.run(row)
# Extract outputs need to be fed to our repeat checker and need to be checked for stop iterations
if isinstance(node, Extract):
if output is None or batch_done(gen_run.inputs_processed):
self._logger.debug('loading batch...')
self._load_repeats(meta_raw_connection, generator)
rows_inserted, rows_updated = self._load(main_raw_connection, generator)
gen_run.rows_inserted += rows_inserted
gen_run.rows_updated += rows_updated
meta_session.commit()
self._logger.debug('done loading batch.')
self._logger.debug(f'inserted {rows_inserted} rows.')
self._logger.debug(f'updated {rows_updated} rows.')
# if we are out of rows break out of while loop
if output is None:
raise StopIteration
is_repeat, input_hash = self._check_repeat(output, generator.uuid)
if not run_config.retry and is_repeat:
raise RepeatException()
row[node.hash] = output # type: ignore
if not is_repeat:
self._new_repeats.add(input_hash)
gen_run.unique_inputs += 1
progress_bar.update()
# Stop iteration is used to catch the empty extractor
except StopIteration:
break
# A repeated input from the extract will also cause a row to be skipped
except RepeatException:
continue
# Any node can raise a skip exception to skip the input before loading
except DBgenSkipException as exc:
self._logger.debug(f"Skipped Row: {exc.msg}")
gen_run.inputs_skipped += 1
# External errors are raised whenever a node fails due to internal logic
except DBgenExternalError as e:
msg = f"\n\nError when running generator {generator.name}\n"
self._logger.error(msg)
self._logger.error(f"\n{e}")
gen_run.status = Status.failed
gen_run.error = str(e)
run = meta_session.get(RunEntity, run_id)
assert run
run.errors = run.errors + 1 if run.errors else 1
meta_session.commit()
meta_session.close()
return 2
except (
Exception,
KeyboardInterrupt,
SystemExit,
BdbQuit,
) as e:
gen_run.status = Status.failed
gen_run.error = (
f"Uncaught Error encountered during running of generator {generator.name}: {e!r}"
)
update_run_by_id(run_id, Status.failed, meta_session)
raise
# Close all connections
finally:
gen_run.runtime = round(time() - start, 3)
meta_session.commit()
main_raw_connection.close()
meta_raw_connection.close()
extractor_connection.close()
gen_run.status = Status.completed
gen_run.runtime = round(time() - start, 3)
self._logger.info(
f"Finished running generator {generator.name}({generator.uuid}) in {gen_run.runtime}(s)."
)
self._logger.info(f"Loaded approximately {gen_run.rows_inserted} rows")
meta_session.commit()
meta_session.close()
return 0
def _initialize_gen_run(
self,
session: Session,
generator: Generator,
run_id: Optional[int],
ordering: Optional[int],
) -> GeneratorRunEntity:
# if no run_id is provided create one and mark it as a testing run
if run_id is None:
run = RunEntity(status='testing')
session.add(run)
session.commit()
session.refresh(run)
ordering = 0
run_id = run.id
gen_row = generator._get_gen_row()
session.merge(gen_row)
session.commit()
query = generator.extract.query if isinstance(generator.extract, BaseQuery) else ''
gen_run = GeneratorRunEntity(
run_id=run_id,
generator_id=gen_row.id,
status=Status.initialized,
ordering=ordering,
query=query,
)
session.add(gen_run)
session.commit()
session.refresh(gen_run)
return gen_run
def _load(self, connection, generator: Generator) -> Tuple[int, int]:
rows_inserted = 0
rows_updated = 0
for load in generator._sorted_loads():
if load.insert:
rows_inserted += len(load._output)
else:
rows_updated += len(load._output)
load.load(connection, gen_id=self.uuid)
return (rows_inserted, rows_updated)
def _load_repeats(self, connection, generator: Generator) -> None:
rows = ((generator.uuid, input_hash) for input_hash in self._new_repeats)
Repeats._quick_load(connection, rows, column_names=["generator_id", "input_hash"])
self._old_repeats = self._old_repeats.union(self._new_repeats)
self._new_repeats = set()
def _check_repeat(self, extracted_dict: Dict[str, Any], generator_uuid: UUID) -> Tuple[bool, UUID]:
# Convert Row to a dictionary so we can hash it for repeat-checking
input_hash = UUID(hasher((generator_uuid, extracted_dict), encoders=encoders))
# If the input_hash has been seen and we don't have retry=True skip row
is_repeat = input_hash in self._old_repeats or input_hash in self._new_repeats
return (is_repeat, input_hash)
class GeneratorRun(BaseGeneratorRun):
generator: Generator
def get_gen(self, meta_engine: Engine, *args, **kwargs):
return self.generator
class RemoteGeneratorRun(BaseGeneratorRun):
generator_id: UUID
def get_gen(self, meta_engine, *args, **kwargs):
with | Session(meta_engine) | sqlmodel.Session |
# Copyright 2021 Modelyst LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Objects related to the running of Models and Generators."""
from bdb import BdbQuit
from datetime import datetime, timedelta
from time import time
from typing import Any, Dict, List, Mapping, Optional, Set, Tuple
from uuid import UUID
from psycopg import connect as pg3_connect
from pydantic.fields import Field, PrivateAttr
from pydasher import hasher
from sqlalchemy.future import Engine
from sqlmodel import Session, select
from tqdm import tqdm
import dbgen.exceptions as exceptions
from dbgen.core.base import Base, encoders
from dbgen.core.generator import Generator
from dbgen.core.metadata import (
GeneratorEntity,
GeneratorRunEntity,
GensToRun,
ModelEntity,
Repeats,
RunEntity,
Status,
)
from dbgen.core.model import Model
from dbgen.core.node.extract import Extract
from dbgen.core.node.query import BaseQuery
from dbgen.exceptions import DBgenExternalError, DBgenSkipException, RepeatException, SerializationError
from dbgen.utils.log import LogLevel
class RunConfig(Base):
"""Configuration for the running of a Generator and Model"""
retry: bool = False
include: Set[str] = Field(default_factory=set)
exclude: Set[str] = Field(default_factory=set)
start: Optional[str]
until: Optional[str]
batch_size: Optional[int]
progress_bar: bool = True
log_level: LogLevel = LogLevel.INFO
def should_gen_run(self, generator: Generator) -> bool:
"""Check a generator against include/exclude to see if it should run."""
markers = [generator.name, *generator.tags]
should_run = any(
map(
lambda x: (not self.include or x in self.include) and x not in self.exclude,
markers,
)
)
return should_run
def get_invalid_markers(self, model: Model) -> Dict[str, List[str]]:
"""Check that all inputs to RunConfig are meaningful for the model."""
invalid_marks = {}
gen_names = model.gens().keys()
# Validate start and until
for attr in ("start", "until"):
val: str = getattr(self, attr)
if val is not None and val not in gen_names:
invalid_marks[attr] = [val]
# Validate include and exclude as sets
for attr in ("include", "exclude"):
set_val: Set[str] = getattr(self, attr)
invalid_vals = [x for x in set_val if not model.validate_marker(x)]
if invalid_vals:
invalid_marks[attr] = invalid_vals
return invalid_marks
def update_run_by_id(run_id, status: Status, session: Session):
run = session.get(RunEntity, run_id)
assert run, f"No run found with id {run_id}"
run.status = status
session.commit()
class RunInitializer(Base):
"""Intializes a run by syncing the database and getting the run_id."""
def execute(self, engine: Engine, run_config: RunConfig) -> int:
# Use some metadatabase connection to initialize initialize the run
# Store the details of the run on the metadatabase so downstream GeneratorRuns can pick them up
# Sync the database with the registries
with Session(engine) as session:
run = RunEntity(status=Status.initialized)
session.add(run)
session.commit()
session.refresh(run)
assert isinstance(run.id, int)
run.status = Status.running
session.commit()
run_id = run.id
return run_id
class BaseGeneratorRun(Base):
"""A lightwieght wrapper for the Generator that grabs a specific Generator from metadatabase and runs it."""
_old_repeats: Set[UUID] = PrivateAttr(default_factory=set)
_new_repeats: Set[UUID] = PrivateAttr(default_factory=set)
def get_gen(self, meta_engine: Engine, *args, **kwargs) -> Generator:
raise NotImplementedError
def execute(
self,
main_engine: Engine,
meta_engine: Engine,
run_id: Optional[int],
run_config: Optional[RunConfig],
ordering: Optional[int],
):
# Set default values for run_config if none provided
if run_config is None:
run_config = RunConfig()
generator = self.get_gen(meta_engine=meta_engine)
# Initialize the generator_row in the meta database
meta_session = Session(meta_engine)
gen_run = self._initialize_gen_run(
generator=generator, session=meta_session, run_id=run_id, ordering=ordering
)
# Check if our run config excludes our generator
if not run_config.should_gen_run(generator):
self._logger.info(f'Excluding generator {generator.name!r}')
gen_run.status = Status.excluded
meta_session.commit()
return
# Start the Generator
self._logger.info(f'Running generator {generator.name!r}...')
gen_run.status = Status.running
meta_session.commit()
start = time()
# Set the extractor
self._logger.debug('Initializing extractor')
extractor_connection = main_engine.connect()
extract = generator.extract
if isinstance(extract, BaseQuery):
extract.set_extractor(connection=extractor_connection)
else:
extract.set_extractor()
self._logger.debug('Fetching extractor length')
row_count = extract.length(connection=extractor_connection)
gen_run.inputs_extracted = row_count
meta_session.commit()
self._logger.debug('Fetching repeats')
# Query the repeats table for input_hashes that match this generator's hash
self._old_repeats = set(
meta_session.exec(select(Repeats.input_hash).where(Repeats.generator_id == generator.uuid)).all()
)
# The batch_size is set either on the run_config or the generator
batch_size = run_config.batch_size or generator.batch_size
assert batch_size is None or batch_size > 0, f"Invalid batch size batch_size must be >0: {batch_size}"
# Open raw connections for fast loading
main_raw_connection = pg3_connect(str(main_engine.url))
meta_raw_connection = meta_engine.raw_connection()
batch_done = lambda x: x % batch_size == 0 if batch_size is not None else False
# Start while loop to iterate through the nodes
self._logger.info('Looping through extracted rows...')
progress_bar = tqdm(
total=row_count,
position=1,
leave=False,
desc="Transforming...",
disable=not run_config.progress_bar,
)
try:
while True:
gen_run.inputs_processed += 1
row: Dict[str, Mapping[str, Any]] = {}
try:
for node in generator._sort_graph():
output = node.run(row)
# Extract outputs need to be fed to our repeat checker and need to be checked for stop iterations
if isinstance(node, Extract):
if output is None or batch_done(gen_run.inputs_processed):
self._logger.debug('loading batch...')
self._load_repeats(meta_raw_connection, generator)
rows_inserted, rows_updated = self._load(main_raw_connection, generator)
gen_run.rows_inserted += rows_inserted
gen_run.rows_updated += rows_updated
meta_session.commit()
self._logger.debug('done loading batch.')
self._logger.debug(f'inserted {rows_inserted} rows.')
self._logger.debug(f'updated {rows_updated} rows.')
# if we are out of rows break out of while loop
if output is None:
raise StopIteration
is_repeat, input_hash = self._check_repeat(output, generator.uuid)
if not run_config.retry and is_repeat:
raise RepeatException()
row[node.hash] = output # type: ignore
if not is_repeat:
self._new_repeats.add(input_hash)
gen_run.unique_inputs += 1
progress_bar.update()
# Stop iteration is used to catch the empty extractor
except StopIteration:
break
# A repeated input from the extract will also cause a row to be skipped
except RepeatException:
continue
# Any node can raise a skip exception to skip the input before loading
except DBgenSkipException as exc:
self._logger.debug(f"Skipped Row: {exc.msg}")
gen_run.inputs_skipped += 1
# External errors are raised whenever a node fails due to internal logic
except DBgenExternalError as e:
msg = f"\n\nError when running generator {generator.name}\n"
self._logger.error(msg)
self._logger.error(f"\n{e}")
gen_run.status = Status.failed
gen_run.error = str(e)
run = meta_session.get(RunEntity, run_id)
assert run
run.errors = run.errors + 1 if run.errors else 1
meta_session.commit()
meta_session.close()
return 2
except (
Exception,
KeyboardInterrupt,
SystemExit,
BdbQuit,
) as e:
gen_run.status = Status.failed
gen_run.error = (
f"Uncaught Error encountered during running of generator {generator.name}: {e!r}"
)
update_run_by_id(run_id, Status.failed, meta_session)
raise
# Close all connections
finally:
gen_run.runtime = round(time() - start, 3)
meta_session.commit()
main_raw_connection.close()
meta_raw_connection.close()
extractor_connection.close()
gen_run.status = Status.completed
gen_run.runtime = round(time() - start, 3)
self._logger.info(
f"Finished running generator {generator.name}({generator.uuid}) in {gen_run.runtime}(s)."
)
self._logger.info(f"Loaded approximately {gen_run.rows_inserted} rows")
meta_session.commit()
meta_session.close()
return 0
def _initialize_gen_run(
self,
session: Session,
generator: Generator,
run_id: Optional[int],
ordering: Optional[int],
) -> GeneratorRunEntity:
# if no run_id is provided create one and mark it as a testing run
if run_id is None:
run = RunEntity(status='testing')
session.add(run)
session.commit()
session.refresh(run)
ordering = 0
run_id = run.id
gen_row = generator._get_gen_row()
session.merge(gen_row)
session.commit()
query = generator.extract.query if isinstance(generator.extract, BaseQuery) else ''
gen_run = GeneratorRunEntity(
run_id=run_id,
generator_id=gen_row.id,
status=Status.initialized,
ordering=ordering,
query=query,
)
session.add(gen_run)
session.commit()
session.refresh(gen_run)
return gen_run
def _load(self, connection, generator: Generator) -> Tuple[int, int]:
rows_inserted = 0
rows_updated = 0
for load in generator._sorted_loads():
if load.insert:
rows_inserted += len(load._output)
else:
rows_updated += len(load._output)
load.load(connection, gen_id=self.uuid)
return (rows_inserted, rows_updated)
def _load_repeats(self, connection, generator: Generator) -> None:
rows = ((generator.uuid, input_hash) for input_hash in self._new_repeats)
Repeats._quick_load(connection, rows, column_names=["generator_id", "input_hash"])
self._old_repeats = self._old_repeats.union(self._new_repeats)
self._new_repeats = set()
def _check_repeat(self, extracted_dict: Dict[str, Any], generator_uuid: UUID) -> Tuple[bool, UUID]:
# Convert Row to a dictionary so we can hash it for repeat-checking
input_hash = UUID(hasher((generator_uuid, extracted_dict), encoders=encoders))
# If the input_hash has been seen and we don't have retry=True skip row
is_repeat = input_hash in self._old_repeats or input_hash in self._new_repeats
return (is_repeat, input_hash)
class GeneratorRun(BaseGeneratorRun):
generator: Generator
def get_gen(self, meta_engine: Engine, *args, **kwargs):
return self.generator
class RemoteGeneratorRun(BaseGeneratorRun):
generator_id: UUID
def get_gen(self, meta_engine, *args, **kwargs):
with Session(meta_engine) as sess:
gen_json = sess.exec(
select(GeneratorEntity.gen_json).where(GeneratorEntity.id == self.generator_id)
).one()
try:
generator = Generator.deserialize(gen_json)
except ModuleNotFoundError as exc:
import os
raise SerializationError(
f"While deserializing generator id {self.generator_id} an unknown module was encountered. Are you using custom dbgen objects reachable by your python environment? Make sure any custom extractors or code can be found in your PYTHONPATH environment variable\nError: {exc}\nPYTHONPATH={os.environ.get('PYTHONPATH')}"
) from exc
if generator.uuid != self.generator_id:
error = f"Deserialization Failed the generator hash has changed for generator named {generator.name}!\n{generator}\n{self.generator_id}"
raise exceptions.SerializationError(error)
return generator
class ModelRun(Base):
model: Model
def get_gen_run(self, generator: Generator) -> BaseGeneratorRun:
return GeneratorRun(generator=generator)
def execute(
self,
main_engine: Engine,
meta_engine: Engine,
run_config: RunConfig = None,
nuke: bool = False,
rerun_failed: bool = False,
) -> RunEntity:
start = time()
if run_config is None:
run_config = RunConfig()
# Sync the Database statew with the model state
self.model.sync(main_engine, meta_engine, nuke=nuke)
# If doing last failed run query for gens to run and add to include
if rerun_failed:
with meta_engine.connect() as conn:
result = conn.execute(select(GensToRun.__table__.c.name))
for (gen_name,) in result:
run_config.include.add(gen_name)
# Initialize the run
run_init = RunInitializer()
run_id = run_init.execute(meta_engine, run_config)
sorted_generators = self.model._sort_graph()
# Add generators to metadb
with | Session(meta_engine) | sqlmodel.Session |
# Copyright 2021 Modelyst LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Objects related to the running of Models and Generators."""
from bdb import BdbQuit
from datetime import datetime, timedelta
from time import time
from typing import Any, Dict, List, Mapping, Optional, Set, Tuple
from uuid import UUID
from psycopg import connect as pg3_connect
from pydantic.fields import Field, PrivateAttr
from pydasher import hasher
from sqlalchemy.future import Engine
from sqlmodel import Session, select
from tqdm import tqdm
import dbgen.exceptions as exceptions
from dbgen.core.base import Base, encoders
from dbgen.core.generator import Generator
from dbgen.core.metadata import (
GeneratorEntity,
GeneratorRunEntity,
GensToRun,
ModelEntity,
Repeats,
RunEntity,
Status,
)
from dbgen.core.model import Model
from dbgen.core.node.extract import Extract
from dbgen.core.node.query import BaseQuery
from dbgen.exceptions import DBgenExternalError, DBgenSkipException, RepeatException, SerializationError
from dbgen.utils.log import LogLevel
class RunConfig(Base):
"""Configuration for the running of a Generator and Model"""
retry: bool = False
include: Set[str] = Field(default_factory=set)
exclude: Set[str] = Field(default_factory=set)
start: Optional[str]
until: Optional[str]
batch_size: Optional[int]
progress_bar: bool = True
log_level: LogLevel = LogLevel.INFO
def should_gen_run(self, generator: Generator) -> bool:
"""Check a generator against include/exclude to see if it should run."""
markers = [generator.name, *generator.tags]
should_run = any(
map(
lambda x: (not self.include or x in self.include) and x not in self.exclude,
markers,
)
)
return should_run
def get_invalid_markers(self, model: Model) -> Dict[str, List[str]]:
"""Check that all inputs to RunConfig are meaningful for the model."""
invalid_marks = {}
gen_names = model.gens().keys()
# Validate start and until
for attr in ("start", "until"):
val: str = getattr(self, attr)
if val is not None and val not in gen_names:
invalid_marks[attr] = [val]
# Validate include and exclude as sets
for attr in ("include", "exclude"):
set_val: Set[str] = getattr(self, attr)
invalid_vals = [x for x in set_val if not model.validate_marker(x)]
if invalid_vals:
invalid_marks[attr] = invalid_vals
return invalid_marks
def update_run_by_id(run_id, status: Status, session: Session):
run = session.get(RunEntity, run_id)
assert run, f"No run found with id {run_id}"
run.status = status
session.commit()
class RunInitializer(Base):
"""Intializes a run by syncing the database and getting the run_id."""
def execute(self, engine: Engine, run_config: RunConfig) -> int:
# Use some metadatabase connection to initialize initialize the run
# Store the details of the run on the metadatabase so downstream GeneratorRuns can pick them up
# Sync the database with the registries
with Session(engine) as session:
run = RunEntity(status=Status.initialized)
session.add(run)
session.commit()
session.refresh(run)
assert isinstance(run.id, int)
run.status = Status.running
session.commit()
run_id = run.id
return run_id
class BaseGeneratorRun(Base):
"""A lightwieght wrapper for the Generator that grabs a specific Generator from metadatabase and runs it."""
_old_repeats: Set[UUID] = PrivateAttr(default_factory=set)
_new_repeats: Set[UUID] = PrivateAttr(default_factory=set)
def get_gen(self, meta_engine: Engine, *args, **kwargs) -> Generator:
raise NotImplementedError
def execute(
self,
main_engine: Engine,
meta_engine: Engine,
run_id: Optional[int],
run_config: Optional[RunConfig],
ordering: Optional[int],
):
# Set default values for run_config if none provided
if run_config is None:
run_config = RunConfig()
generator = self.get_gen(meta_engine=meta_engine)
# Initialize the generator_row in the meta database
meta_session = Session(meta_engine)
gen_run = self._initialize_gen_run(
generator=generator, session=meta_session, run_id=run_id, ordering=ordering
)
# Check if our run config excludes our generator
if not run_config.should_gen_run(generator):
self._logger.info(f'Excluding generator {generator.name!r}')
gen_run.status = Status.excluded
meta_session.commit()
return
# Start the Generator
self._logger.info(f'Running generator {generator.name!r}...')
gen_run.status = Status.running
meta_session.commit()
start = time()
# Set the extractor
self._logger.debug('Initializing extractor')
extractor_connection = main_engine.connect()
extract = generator.extract
if isinstance(extract, BaseQuery):
extract.set_extractor(connection=extractor_connection)
else:
extract.set_extractor()
self._logger.debug('Fetching extractor length')
row_count = extract.length(connection=extractor_connection)
gen_run.inputs_extracted = row_count
meta_session.commit()
self._logger.debug('Fetching repeats')
# Query the repeats table for input_hashes that match this generator's hash
self._old_repeats = set(
meta_session.exec(select(Repeats.input_hash).where(Repeats.generator_id == generator.uuid)).all()
)
# The batch_size is set either on the run_config or the generator
batch_size = run_config.batch_size or generator.batch_size
assert batch_size is None or batch_size > 0, f"Invalid batch size batch_size must be >0: {batch_size}"
# Open raw connections for fast loading
main_raw_connection = pg3_connect(str(main_engine.url))
meta_raw_connection = meta_engine.raw_connection()
batch_done = lambda x: x % batch_size == 0 if batch_size is not None else False
# Start while loop to iterate through the nodes
self._logger.info('Looping through extracted rows...')
progress_bar = tqdm(
total=row_count,
position=1,
leave=False,
desc="Transforming...",
disable=not run_config.progress_bar,
)
try:
while True:
gen_run.inputs_processed += 1
row: Dict[str, Mapping[str, Any]] = {}
try:
for node in generator._sort_graph():
output = node.run(row)
# Extract outputs need to be fed to our repeat checker and need to be checked for stop iterations
if isinstance(node, Extract):
if output is None or batch_done(gen_run.inputs_processed):
self._logger.debug('loading batch...')
self._load_repeats(meta_raw_connection, generator)
rows_inserted, rows_updated = self._load(main_raw_connection, generator)
gen_run.rows_inserted += rows_inserted
gen_run.rows_updated += rows_updated
meta_session.commit()
self._logger.debug('done loading batch.')
self._logger.debug(f'inserted {rows_inserted} rows.')
self._logger.debug(f'updated {rows_updated} rows.')
# if we are out of rows break out of while loop
if output is None:
raise StopIteration
is_repeat, input_hash = self._check_repeat(output, generator.uuid)
if not run_config.retry and is_repeat:
raise RepeatException()
row[node.hash] = output # type: ignore
if not is_repeat:
self._new_repeats.add(input_hash)
gen_run.unique_inputs += 1
progress_bar.update()
# Stop iteration is used to catch the empty extractor
except StopIteration:
break
# A repeated input from the extract will also cause a row to be skipped
except RepeatException:
continue
# Any node can raise a skip exception to skip the input before loading
except DBgenSkipException as exc:
self._logger.debug(f"Skipped Row: {exc.msg}")
gen_run.inputs_skipped += 1
# External errors are raised whenever a node fails due to internal logic
except DBgenExternalError as e:
msg = f"\n\nError when running generator {generator.name}\n"
self._logger.error(msg)
self._logger.error(f"\n{e}")
gen_run.status = Status.failed
gen_run.error = str(e)
run = meta_session.get(RunEntity, run_id)
assert run
run.errors = run.errors + 1 if run.errors else 1
meta_session.commit()
meta_session.close()
return 2
except (
Exception,
KeyboardInterrupt,
SystemExit,
BdbQuit,
) as e:
gen_run.status = Status.failed
gen_run.error = (
f"Uncaught Error encountered during running of generator {generator.name}: {e!r}"
)
update_run_by_id(run_id, Status.failed, meta_session)
raise
# Close all connections
finally:
gen_run.runtime = round(time() - start, 3)
meta_session.commit()
main_raw_connection.close()
meta_raw_connection.close()
extractor_connection.close()
gen_run.status = Status.completed
gen_run.runtime = round(time() - start, 3)
self._logger.info(
f"Finished running generator {generator.name}({generator.uuid}) in {gen_run.runtime}(s)."
)
self._logger.info(f"Loaded approximately {gen_run.rows_inserted} rows")
meta_session.commit()
meta_session.close()
return 0
def _initialize_gen_run(
self,
session: Session,
generator: Generator,
run_id: Optional[int],
ordering: Optional[int],
) -> GeneratorRunEntity:
# if no run_id is provided create one and mark it as a testing run
if run_id is None:
run = RunEntity(status='testing')
session.add(run)
session.commit()
session.refresh(run)
ordering = 0
run_id = run.id
gen_row = generator._get_gen_row()
session.merge(gen_row)
session.commit()
query = generator.extract.query if isinstance(generator.extract, BaseQuery) else ''
gen_run = GeneratorRunEntity(
run_id=run_id,
generator_id=gen_row.id,
status=Status.initialized,
ordering=ordering,
query=query,
)
session.add(gen_run)
session.commit()
session.refresh(gen_run)
return gen_run
def _load(self, connection, generator: Generator) -> Tuple[int, int]:
rows_inserted = 0
rows_updated = 0
for load in generator._sorted_loads():
if load.insert:
rows_inserted += len(load._output)
else:
rows_updated += len(load._output)
load.load(connection, gen_id=self.uuid)
return (rows_inserted, rows_updated)
def _load_repeats(self, connection, generator: Generator) -> None:
rows = ((generator.uuid, input_hash) for input_hash in self._new_repeats)
Repeats._quick_load(connection, rows, column_names=["generator_id", "input_hash"])
self._old_repeats = self._old_repeats.union(self._new_repeats)
self._new_repeats = set()
def _check_repeat(self, extracted_dict: Dict[str, Any], generator_uuid: UUID) -> Tuple[bool, UUID]:
# Convert Row to a dictionary so we can hash it for repeat-checking
input_hash = UUID(hasher((generator_uuid, extracted_dict), encoders=encoders))
# If the input_hash has been seen and we don't have retry=True skip row
is_repeat = input_hash in self._old_repeats or input_hash in self._new_repeats
return (is_repeat, input_hash)
class GeneratorRun(BaseGeneratorRun):
generator: Generator
def get_gen(self, meta_engine: Engine, *args, **kwargs):
return self.generator
class RemoteGeneratorRun(BaseGeneratorRun):
generator_id: UUID
def get_gen(self, meta_engine, *args, **kwargs):
with Session(meta_engine) as sess:
gen_json = sess.exec(
select(GeneratorEntity.gen_json).where(GeneratorEntity.id == self.generator_id)
).one()
try:
generator = Generator.deserialize(gen_json)
except ModuleNotFoundError as exc:
import os
raise SerializationError(
f"While deserializing generator id {self.generator_id} an unknown module was encountered. Are you using custom dbgen objects reachable by your python environment? Make sure any custom extractors or code can be found in your PYTHONPATH environment variable\nError: {exc}\nPYTHONPATH={os.environ.get('PYTHONPATH')}"
) from exc
if generator.uuid != self.generator_id:
error = f"Deserialization Failed the generator hash has changed for generator named {generator.name}!\n{generator}\n{self.generator_id}"
raise exceptions.SerializationError(error)
return generator
class ModelRun(Base):
model: Model
def get_gen_run(self, generator: Generator) -> BaseGeneratorRun:
return GeneratorRun(generator=generator)
def execute(
self,
main_engine: Engine,
meta_engine: Engine,
run_config: RunConfig = None,
nuke: bool = False,
rerun_failed: bool = False,
) -> RunEntity:
start = time()
if run_config is None:
run_config = RunConfig()
# Sync the Database statew with the model state
self.model.sync(main_engine, meta_engine, nuke=nuke)
# If doing last failed run query for gens to run and add to include
if rerun_failed:
with meta_engine.connect() as conn:
result = conn.execute(select(GensToRun.__table__.c.name))
for (gen_name,) in result:
run_config.include.add(gen_name)
# Initialize the run
run_init = RunInitializer()
run_id = run_init.execute(meta_engine, run_config)
sorted_generators = self.model._sort_graph()
# Add generators to metadb
with Session(meta_engine) as meta_session:
model_row = self.model._get_model_row()
model_row.last_run = datetime.now()
existing_model = meta_session.get(ModelEntity, model_row.id)
if not existing_model:
meta_session.merge(model_row)
else:
existing_model.last_run = datetime.now()
meta_session.commit()
# Apply start and until to exclude generators not between start_idx and until_idx
if run_config.start or run_config.until:
gen_names = [gen.name for gen in sorted_generators]
start_idx = gen_names.index(run_config.start) if run_config.start else 0
until_idx = gen_names.index(run_config.until) + 1 if run_config.until else len(gen_names)
# Modify include to only include the gen_names that pass the test
run_config.include = run_config.include.union(gen_names[start_idx:until_idx])
print(f"Only running generators: {gen_names[start_idx:until_idx]} due to start/until")
self._logger.debug(
f"Only running generators: {gen_names[start_idx:until_idx]} due to start/until"
)
with tqdm(total=len(sorted_generators), position=0, disable=not run_config.progress_bar) as tq:
for i, generator in enumerate(sorted_generators):
tq.set_description(generator.name)
gen_run = self.get_gen_run(generator)
gen_run.execute(main_engine, meta_engine, run_id, run_config, ordering=i)
tq.update()
# Complete run
with | Session(meta_engine) | sqlmodel.Session |
# Copyright 2021 Modelyst LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Objects related to the running of Models and Generators."""
from bdb import BdbQuit
from datetime import datetime, timedelta
from time import time
from typing import Any, Dict, List, Mapping, Optional, Set, Tuple
from uuid import UUID
from psycopg import connect as pg3_connect
from pydantic.fields import Field, PrivateAttr
from pydasher import hasher
from sqlalchemy.future import Engine
from sqlmodel import Session, select
from tqdm import tqdm
import dbgen.exceptions as exceptions
from dbgen.core.base import Base, encoders
from dbgen.core.generator import Generator
from dbgen.core.metadata import (
GeneratorEntity,
GeneratorRunEntity,
GensToRun,
ModelEntity,
Repeats,
RunEntity,
Status,
)
from dbgen.core.model import Model
from dbgen.core.node.extract import Extract
from dbgen.core.node.query import BaseQuery
from dbgen.exceptions import DBgenExternalError, DBgenSkipException, RepeatException, SerializationError
from dbgen.utils.log import LogLevel
class RunConfig(Base):
"""Configuration for the running of a Generator and Model"""
retry: bool = False
include: Set[str] = Field(default_factory=set)
exclude: Set[str] = Field(default_factory=set)
start: Optional[str]
until: Optional[str]
batch_size: Optional[int]
progress_bar: bool = True
log_level: LogLevel = LogLevel.INFO
def should_gen_run(self, generator: Generator) -> bool:
"""Check a generator against include/exclude to see if it should run."""
markers = [generator.name, *generator.tags]
should_run = any(
map(
lambda x: (not self.include or x in self.include) and x not in self.exclude,
markers,
)
)
return should_run
def get_invalid_markers(self, model: Model) -> Dict[str, List[str]]:
"""Check that all inputs to RunConfig are meaningful for the model."""
invalid_marks = {}
gen_names = model.gens().keys()
# Validate start and until
for attr in ("start", "until"):
val: str = getattr(self, attr)
if val is not None and val not in gen_names:
invalid_marks[attr] = [val]
# Validate include and exclude as sets
for attr in ("include", "exclude"):
set_val: Set[str] = getattr(self, attr)
invalid_vals = [x for x in set_val if not model.validate_marker(x)]
if invalid_vals:
invalid_marks[attr] = invalid_vals
return invalid_marks
def update_run_by_id(run_id, status: Status, session: Session):
run = session.get(RunEntity, run_id)
assert run, f"No run found with id {run_id}"
run.status = status
session.commit()
class RunInitializer(Base):
"""Intializes a run by syncing the database and getting the run_id."""
def execute(self, engine: Engine, run_config: RunConfig) -> int:
# Use some metadatabase connection to initialize initialize the run
# Store the details of the run on the metadatabase so downstream GeneratorRuns can pick them up
# Sync the database with the registries
with Session(engine) as session:
run = RunEntity(status=Status.initialized)
session.add(run)
session.commit()
session.refresh(run)
assert isinstance(run.id, int)
run.status = Status.running
session.commit()
run_id = run.id
return run_id
class BaseGeneratorRun(Base):
"""A lightwieght wrapper for the Generator that grabs a specific Generator from metadatabase and runs it."""
_old_repeats: Set[UUID] = PrivateAttr(default_factory=set)
_new_repeats: Set[UUID] = PrivateAttr(default_factory=set)
def get_gen(self, meta_engine: Engine, *args, **kwargs) -> Generator:
raise NotImplementedError
def execute(
self,
main_engine: Engine,
meta_engine: Engine,
run_id: Optional[int],
run_config: Optional[RunConfig],
ordering: Optional[int],
):
# Set default values for run_config if none provided
if run_config is None:
run_config = RunConfig()
generator = self.get_gen(meta_engine=meta_engine)
# Initialize the generator_row in the meta database
meta_session = Session(meta_engine)
gen_run = self._initialize_gen_run(
generator=generator, session=meta_session, run_id=run_id, ordering=ordering
)
# Check if our run config excludes our generator
if not run_config.should_gen_run(generator):
self._logger.info(f'Excluding generator {generator.name!r}')
gen_run.status = Status.excluded
meta_session.commit()
return
# Start the Generator
self._logger.info(f'Running generator {generator.name!r}...')
gen_run.status = Status.running
meta_session.commit()
start = time()
# Set the extractor
self._logger.debug('Initializing extractor')
extractor_connection = main_engine.connect()
extract = generator.extract
if isinstance(extract, BaseQuery):
extract.set_extractor(connection=extractor_connection)
else:
extract.set_extractor()
self._logger.debug('Fetching extractor length')
row_count = extract.length(connection=extractor_connection)
gen_run.inputs_extracted = row_count
meta_session.commit()
self._logger.debug('Fetching repeats')
# Query the repeats table for input_hashes that match this generator's hash
self._old_repeats = set(
meta_session.exec(select(Repeats.input_hash).where(Repeats.generator_id == generator.uuid)).all()
)
# The batch_size is set either on the run_config or the generator
batch_size = run_config.batch_size or generator.batch_size
assert batch_size is None or batch_size > 0, f"Invalid batch size batch_size must be >0: {batch_size}"
# Open raw connections for fast loading
main_raw_connection = pg3_connect(str(main_engine.url))
meta_raw_connection = meta_engine.raw_connection()
batch_done = lambda x: x % batch_size == 0 if batch_size is not None else False
# Start while loop to iterate through the nodes
self._logger.info('Looping through extracted rows...')
progress_bar = tqdm(
total=row_count,
position=1,
leave=False,
desc="Transforming...",
disable=not run_config.progress_bar,
)
try:
while True:
gen_run.inputs_processed += 1
row: Dict[str, Mapping[str, Any]] = {}
try:
for node in generator._sort_graph():
output = node.run(row)
# Extract outputs need to be fed to our repeat checker and need to be checked for stop iterations
if isinstance(node, Extract):
if output is None or batch_done(gen_run.inputs_processed):
self._logger.debug('loading batch...')
self._load_repeats(meta_raw_connection, generator)
rows_inserted, rows_updated = self._load(main_raw_connection, generator)
gen_run.rows_inserted += rows_inserted
gen_run.rows_updated += rows_updated
meta_session.commit()
self._logger.debug('done loading batch.')
self._logger.debug(f'inserted {rows_inserted} rows.')
self._logger.debug(f'updated {rows_updated} rows.')
# if we are out of rows break out of while loop
if output is None:
raise StopIteration
is_repeat, input_hash = self._check_repeat(output, generator.uuid)
if not run_config.retry and is_repeat:
raise RepeatException()
row[node.hash] = output # type: ignore
if not is_repeat:
self._new_repeats.add(input_hash)
gen_run.unique_inputs += 1
progress_bar.update()
# Stop iteration is used to catch the empty extractor
except StopIteration:
break
# A repeated input from the extract will also cause a row to be skipped
except RepeatException:
continue
# Any node can raise a skip exception to skip the input before loading
except DBgenSkipException as exc:
self._logger.debug(f"Skipped Row: {exc.msg}")
gen_run.inputs_skipped += 1
# External errors are raised whenever a node fails due to internal logic
except DBgenExternalError as e:
msg = f"\n\nError when running generator {generator.name}\n"
self._logger.error(msg)
self._logger.error(f"\n{e}")
gen_run.status = Status.failed
gen_run.error = str(e)
run = meta_session.get(RunEntity, run_id)
assert run
run.errors = run.errors + 1 if run.errors else 1
meta_session.commit()
meta_session.close()
return 2
except (
Exception,
KeyboardInterrupt,
SystemExit,
BdbQuit,
) as e:
gen_run.status = Status.failed
gen_run.error = (
f"Uncaught Error encountered during running of generator {generator.name}: {e!r}"
)
update_run_by_id(run_id, Status.failed, meta_session)
raise
# Close all connections
finally:
gen_run.runtime = round(time() - start, 3)
meta_session.commit()
main_raw_connection.close()
meta_raw_connection.close()
extractor_connection.close()
gen_run.status = Status.completed
gen_run.runtime = round(time() - start, 3)
self._logger.info(
f"Finished running generator {generator.name}({generator.uuid}) in {gen_run.runtime}(s)."
)
self._logger.info(f"Loaded approximately {gen_run.rows_inserted} rows")
meta_session.commit()
meta_session.close()
return 0
def _initialize_gen_run(
self,
session: Session,
generator: Generator,
run_id: Optional[int],
ordering: Optional[int],
) -> GeneratorRunEntity:
# if no run_id is provided create one and mark it as a testing run
if run_id is None:
run = RunEntity(status='testing')
session.add(run)
session.commit()
session.refresh(run)
ordering = 0
run_id = run.id
gen_row = generator._get_gen_row()
session.merge(gen_row)
session.commit()
query = generator.extract.query if isinstance(generator.extract, BaseQuery) else ''
gen_run = GeneratorRunEntity(
run_id=run_id,
generator_id=gen_row.id,
status=Status.initialized,
ordering=ordering,
query=query,
)
session.add(gen_run)
session.commit()
session.refresh(gen_run)
return gen_run
def _load(self, connection, generator: Generator) -> Tuple[int, int]:
rows_inserted = 0
rows_updated = 0
for load in generator._sorted_loads():
if load.insert:
rows_inserted += len(load._output)
else:
rows_updated += len(load._output)
load.load(connection, gen_id=self.uuid)
return (rows_inserted, rows_updated)
def _load_repeats(self, connection, generator: Generator) -> None:
rows = ((generator.uuid, input_hash) for input_hash in self._new_repeats)
Repeats._quick_load(connection, rows, column_names=["generator_id", "input_hash"])
self._old_repeats = self._old_repeats.union(self._new_repeats)
self._new_repeats = set()
def _check_repeat(self, extracted_dict: Dict[str, Any], generator_uuid: UUID) -> Tuple[bool, UUID]:
# Convert Row to a dictionary so we can hash it for repeat-checking
input_hash = UUID(hasher((generator_uuid, extracted_dict), encoders=encoders))
# If the input_hash has been seen and we don't have retry=True skip row
is_repeat = input_hash in self._old_repeats or input_hash in self._new_repeats
return (is_repeat, input_hash)
class GeneratorRun(BaseGeneratorRun):
generator: Generator
def get_gen(self, meta_engine: Engine, *args, **kwargs):
return self.generator
class RemoteGeneratorRun(BaseGeneratorRun):
generator_id: UUID
def get_gen(self, meta_engine, *args, **kwargs):
with Session(meta_engine) as sess:
gen_json = sess.exec(
select(GeneratorEntity.gen_json).where(GeneratorEntity.id == self.generator_id)
).one()
try:
generator = Generator.deserialize(gen_json)
except ModuleNotFoundError as exc:
import os
raise SerializationError(
f"While deserializing generator id {self.generator_id} an unknown module was encountered. Are you using custom dbgen objects reachable by your python environment? Make sure any custom extractors or code can be found in your PYTHONPATH environment variable\nError: {exc}\nPYTHONPATH={os.environ.get('PYTHONPATH')}"
) from exc
if generator.uuid != self.generator_id:
error = f"Deserialization Failed the generator hash has changed for generator named {generator.name}!\n{generator}\n{self.generator_id}"
raise exceptions.SerializationError(error)
return generator
class ModelRun(Base):
model: Model
def get_gen_run(self, generator: Generator) -> BaseGeneratorRun:
return GeneratorRun(generator=generator)
def execute(
self,
main_engine: Engine,
meta_engine: Engine,
run_config: RunConfig = None,
nuke: bool = False,
rerun_failed: bool = False,
) -> RunEntity:
start = time()
if run_config is None:
run_config = RunConfig()
# Sync the Database statew with the model state
self.model.sync(main_engine, meta_engine, nuke=nuke)
# If doing last failed run query for gens to run and add to include
if rerun_failed:
with meta_engine.connect() as conn:
result = conn.execute( | select(GensToRun.__table__.c.name) | sqlmodel.select |
# Copyright 2021 Modelyst LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Objects related to the running of Models and Generators."""
from bdb import BdbQuit
from datetime import datetime, timedelta
from time import time
from typing import Any, Dict, List, Mapping, Optional, Set, Tuple
from uuid import UUID
from psycopg import connect as pg3_connect
from pydantic.fields import Field, PrivateAttr
from pydasher import hasher
from sqlalchemy.future import Engine
from sqlmodel import Session, select
from tqdm import tqdm
import dbgen.exceptions as exceptions
from dbgen.core.base import Base, encoders
from dbgen.core.generator import Generator
from dbgen.core.metadata import (
GeneratorEntity,
GeneratorRunEntity,
GensToRun,
ModelEntity,
Repeats,
RunEntity,
Status,
)
from dbgen.core.model import Model
from dbgen.core.node.extract import Extract
from dbgen.core.node.query import BaseQuery
from dbgen.exceptions import DBgenExternalError, DBgenSkipException, RepeatException, SerializationError
from dbgen.utils.log import LogLevel
class RunConfig(Base):
"""Configuration for the running of a Generator and Model"""
retry: bool = False
include: Set[str] = Field(default_factory=set)
exclude: Set[str] = Field(default_factory=set)
start: Optional[str]
until: Optional[str]
batch_size: Optional[int]
progress_bar: bool = True
log_level: LogLevel = LogLevel.INFO
def should_gen_run(self, generator: Generator) -> bool:
"""Check a generator against include/exclude to see if it should run."""
markers = [generator.name, *generator.tags]
should_run = any(
map(
lambda x: (not self.include or x in self.include) and x not in self.exclude,
markers,
)
)
return should_run
def get_invalid_markers(self, model: Model) -> Dict[str, List[str]]:
"""Check that all inputs to RunConfig are meaningful for the model."""
invalid_marks = {}
gen_names = model.gens().keys()
# Validate start and until
for attr in ("start", "until"):
val: str = getattr(self, attr)
if val is not None and val not in gen_names:
invalid_marks[attr] = [val]
# Validate include and exclude as sets
for attr in ("include", "exclude"):
set_val: Set[str] = getattr(self, attr)
invalid_vals = [x for x in set_val if not model.validate_marker(x)]
if invalid_vals:
invalid_marks[attr] = invalid_vals
return invalid_marks
def update_run_by_id(run_id, status: Status, session: Session):
run = session.get(RunEntity, run_id)
assert run, f"No run found with id {run_id}"
run.status = status
session.commit()
class RunInitializer(Base):
"""Intializes a run by syncing the database and getting the run_id."""
def execute(self, engine: Engine, run_config: RunConfig) -> int:
# Use some metadatabase connection to initialize initialize the run
# Store the details of the run on the metadatabase so downstream GeneratorRuns can pick them up
# Sync the database with the registries
with Session(engine) as session:
run = RunEntity(status=Status.initialized)
session.add(run)
session.commit()
session.refresh(run)
assert isinstance(run.id, int)
run.status = Status.running
session.commit()
run_id = run.id
return run_id
class BaseGeneratorRun(Base):
"""A lightwieght wrapper for the Generator that grabs a specific Generator from metadatabase and runs it."""
_old_repeats: Set[UUID] = PrivateAttr(default_factory=set)
_new_repeats: Set[UUID] = PrivateAttr(default_factory=set)
def get_gen(self, meta_engine: Engine, *args, **kwargs) -> Generator:
raise NotImplementedError
def execute(
self,
main_engine: Engine,
meta_engine: Engine,
run_id: Optional[int],
run_config: Optional[RunConfig],
ordering: Optional[int],
):
# Set default values for run_config if none provided
if run_config is None:
run_config = RunConfig()
generator = self.get_gen(meta_engine=meta_engine)
# Initialize the generator_row in the meta database
meta_session = Session(meta_engine)
gen_run = self._initialize_gen_run(
generator=generator, session=meta_session, run_id=run_id, ordering=ordering
)
# Check if our run config excludes our generator
if not run_config.should_gen_run(generator):
self._logger.info(f'Excluding generator {generator.name!r}')
gen_run.status = Status.excluded
meta_session.commit()
return
# Start the Generator
self._logger.info(f'Running generator {generator.name!r}...')
gen_run.status = Status.running
meta_session.commit()
start = time()
# Set the extractor
self._logger.debug('Initializing extractor')
extractor_connection = main_engine.connect()
extract = generator.extract
if isinstance(extract, BaseQuery):
extract.set_extractor(connection=extractor_connection)
else:
extract.set_extractor()
self._logger.debug('Fetching extractor length')
row_count = extract.length(connection=extractor_connection)
gen_run.inputs_extracted = row_count
meta_session.commit()
self._logger.debug('Fetching repeats')
# Query the repeats table for input_hashes that match this generator's hash
self._old_repeats = set(
meta_session.exec( | select(Repeats.input_hash) | sqlmodel.select |
# Copyright 2021 Modelyst LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Objects related to the running of Models and Generators."""
from bdb import BdbQuit
from datetime import datetime, timedelta
from time import time
from typing import Any, Dict, List, Mapping, Optional, Set, Tuple
from uuid import UUID
from psycopg import connect as pg3_connect
from pydantic.fields import Field, PrivateAttr
from pydasher import hasher
from sqlalchemy.future import Engine
from sqlmodel import Session, select
from tqdm import tqdm
import dbgen.exceptions as exceptions
from dbgen.core.base import Base, encoders
from dbgen.core.generator import Generator
from dbgen.core.metadata import (
GeneratorEntity,
GeneratorRunEntity,
GensToRun,
ModelEntity,
Repeats,
RunEntity,
Status,
)
from dbgen.core.model import Model
from dbgen.core.node.extract import Extract
from dbgen.core.node.query import BaseQuery
from dbgen.exceptions import DBgenExternalError, DBgenSkipException, RepeatException, SerializationError
from dbgen.utils.log import LogLevel
class RunConfig(Base):
"""Configuration for the running of a Generator and Model"""
retry: bool = False
include: Set[str] = Field(default_factory=set)
exclude: Set[str] = Field(default_factory=set)
start: Optional[str]
until: Optional[str]
batch_size: Optional[int]
progress_bar: bool = True
log_level: LogLevel = LogLevel.INFO
def should_gen_run(self, generator: Generator) -> bool:
"""Check a generator against include/exclude to see if it should run."""
markers = [generator.name, *generator.tags]
should_run = any(
map(
lambda x: (not self.include or x in self.include) and x not in self.exclude,
markers,
)
)
return should_run
def get_invalid_markers(self, model: Model) -> Dict[str, List[str]]:
"""Check that all inputs to RunConfig are meaningful for the model."""
invalid_marks = {}
gen_names = model.gens().keys()
# Validate start and until
for attr in ("start", "until"):
val: str = getattr(self, attr)
if val is not None and val not in gen_names:
invalid_marks[attr] = [val]
# Validate include and exclude as sets
for attr in ("include", "exclude"):
set_val: Set[str] = getattr(self, attr)
invalid_vals = [x for x in set_val if not model.validate_marker(x)]
if invalid_vals:
invalid_marks[attr] = invalid_vals
return invalid_marks
def update_run_by_id(run_id, status: Status, session: Session):
run = session.get(RunEntity, run_id)
assert run, f"No run found with id {run_id}"
run.status = status
session.commit()
class RunInitializer(Base):
"""Intializes a run by syncing the database and getting the run_id."""
def execute(self, engine: Engine, run_config: RunConfig) -> int:
# Use some metadatabase connection to initialize initialize the run
# Store the details of the run on the metadatabase so downstream GeneratorRuns can pick them up
# Sync the database with the registries
with Session(engine) as session:
run = RunEntity(status=Status.initialized)
session.add(run)
session.commit()
session.refresh(run)
assert isinstance(run.id, int)
run.status = Status.running
session.commit()
run_id = run.id
return run_id
class BaseGeneratorRun(Base):
"""A lightwieght wrapper for the Generator that grabs a specific Generator from metadatabase and runs it."""
_old_repeats: Set[UUID] = PrivateAttr(default_factory=set)
_new_repeats: Set[UUID] = PrivateAttr(default_factory=set)
def get_gen(self, meta_engine: Engine, *args, **kwargs) -> Generator:
raise NotImplementedError
def execute(
self,
main_engine: Engine,
meta_engine: Engine,
run_id: Optional[int],
run_config: Optional[RunConfig],
ordering: Optional[int],
):
# Set default values for run_config if none provided
if run_config is None:
run_config = RunConfig()
generator = self.get_gen(meta_engine=meta_engine)
# Initialize the generator_row in the meta database
meta_session = Session(meta_engine)
gen_run = self._initialize_gen_run(
generator=generator, session=meta_session, run_id=run_id, ordering=ordering
)
# Check if our run config excludes our generator
if not run_config.should_gen_run(generator):
self._logger.info(f'Excluding generator {generator.name!r}')
gen_run.status = Status.excluded
meta_session.commit()
return
# Start the Generator
self._logger.info(f'Running generator {generator.name!r}...')
gen_run.status = Status.running
meta_session.commit()
start = time()
# Set the extractor
self._logger.debug('Initializing extractor')
extractor_connection = main_engine.connect()
extract = generator.extract
if isinstance(extract, BaseQuery):
extract.set_extractor(connection=extractor_connection)
else:
extract.set_extractor()
self._logger.debug('Fetching extractor length')
row_count = extract.length(connection=extractor_connection)
gen_run.inputs_extracted = row_count
meta_session.commit()
self._logger.debug('Fetching repeats')
# Query the repeats table for input_hashes that match this generator's hash
self._old_repeats = set(
meta_session.exec(select(Repeats.input_hash).where(Repeats.generator_id == generator.uuid)).all()
)
# The batch_size is set either on the run_config or the generator
batch_size = run_config.batch_size or generator.batch_size
assert batch_size is None or batch_size > 0, f"Invalid batch size batch_size must be >0: {batch_size}"
# Open raw connections for fast loading
main_raw_connection = pg3_connect(str(main_engine.url))
meta_raw_connection = meta_engine.raw_connection()
batch_done = lambda x: x % batch_size == 0 if batch_size is not None else False
# Start while loop to iterate through the nodes
self._logger.info('Looping through extracted rows...')
progress_bar = tqdm(
total=row_count,
position=1,
leave=False,
desc="Transforming...",
disable=not run_config.progress_bar,
)
try:
while True:
gen_run.inputs_processed += 1
row: Dict[str, Mapping[str, Any]] = {}
try:
for node in generator._sort_graph():
output = node.run(row)
# Extract outputs need to be fed to our repeat checker and need to be checked for stop iterations
if isinstance(node, Extract):
if output is None or batch_done(gen_run.inputs_processed):
self._logger.debug('loading batch...')
self._load_repeats(meta_raw_connection, generator)
rows_inserted, rows_updated = self._load(main_raw_connection, generator)
gen_run.rows_inserted += rows_inserted
gen_run.rows_updated += rows_updated
meta_session.commit()
self._logger.debug('done loading batch.')
self._logger.debug(f'inserted {rows_inserted} rows.')
self._logger.debug(f'updated {rows_updated} rows.')
# if we are out of rows break out of while loop
if output is None:
raise StopIteration
is_repeat, input_hash = self._check_repeat(output, generator.uuid)
if not run_config.retry and is_repeat:
raise RepeatException()
row[node.hash] = output # type: ignore
if not is_repeat:
self._new_repeats.add(input_hash)
gen_run.unique_inputs += 1
progress_bar.update()
# Stop iteration is used to catch the empty extractor
except StopIteration:
break
# A repeated input from the extract will also cause a row to be skipped
except RepeatException:
continue
# Any node can raise a skip exception to skip the input before loading
except DBgenSkipException as exc:
self._logger.debug(f"Skipped Row: {exc.msg}")
gen_run.inputs_skipped += 1
# External errors are raised whenever a node fails due to internal logic
except DBgenExternalError as e:
msg = f"\n\nError when running generator {generator.name}\n"
self._logger.error(msg)
self._logger.error(f"\n{e}")
gen_run.status = Status.failed
gen_run.error = str(e)
run = meta_session.get(RunEntity, run_id)
assert run
run.errors = run.errors + 1 if run.errors else 1
meta_session.commit()
meta_session.close()
return 2
except (
Exception,
KeyboardInterrupt,
SystemExit,
BdbQuit,
) as e:
gen_run.status = Status.failed
gen_run.error = (
f"Uncaught Error encountered during running of generator {generator.name}: {e!r}"
)
update_run_by_id(run_id, Status.failed, meta_session)
raise
# Close all connections
finally:
gen_run.runtime = round(time() - start, 3)
meta_session.commit()
main_raw_connection.close()
meta_raw_connection.close()
extractor_connection.close()
gen_run.status = Status.completed
gen_run.runtime = round(time() - start, 3)
self._logger.info(
f"Finished running generator {generator.name}({generator.uuid}) in {gen_run.runtime}(s)."
)
self._logger.info(f"Loaded approximately {gen_run.rows_inserted} rows")
meta_session.commit()
meta_session.close()
return 0
def _initialize_gen_run(
self,
session: Session,
generator: Generator,
run_id: Optional[int],
ordering: Optional[int],
) -> GeneratorRunEntity:
# if no run_id is provided create one and mark it as a testing run
if run_id is None:
run = RunEntity(status='testing')
session.add(run)
session.commit()
session.refresh(run)
ordering = 0
run_id = run.id
gen_row = generator._get_gen_row()
session.merge(gen_row)
session.commit()
query = generator.extract.query if isinstance(generator.extract, BaseQuery) else ''
gen_run = GeneratorRunEntity(
run_id=run_id,
generator_id=gen_row.id,
status=Status.initialized,
ordering=ordering,
query=query,
)
session.add(gen_run)
session.commit()
session.refresh(gen_run)
return gen_run
def _load(self, connection, generator: Generator) -> Tuple[int, int]:
rows_inserted = 0
rows_updated = 0
for load in generator._sorted_loads():
if load.insert:
rows_inserted += len(load._output)
else:
rows_updated += len(load._output)
load.load(connection, gen_id=self.uuid)
return (rows_inserted, rows_updated)
def _load_repeats(self, connection, generator: Generator) -> None:
rows = ((generator.uuid, input_hash) for input_hash in self._new_repeats)
Repeats._quick_load(connection, rows, column_names=["generator_id", "input_hash"])
self._old_repeats = self._old_repeats.union(self._new_repeats)
self._new_repeats = set()
def _check_repeat(self, extracted_dict: Dict[str, Any], generator_uuid: UUID) -> Tuple[bool, UUID]:
# Convert Row to a dictionary so we can hash it for repeat-checking
input_hash = UUID(hasher((generator_uuid, extracted_dict), encoders=encoders))
# If the input_hash has been seen and we don't have retry=True skip row
is_repeat = input_hash in self._old_repeats or input_hash in self._new_repeats
return (is_repeat, input_hash)
class GeneratorRun(BaseGeneratorRun):
generator: Generator
def get_gen(self, meta_engine: Engine, *args, **kwargs):
return self.generator
class RemoteGeneratorRun(BaseGeneratorRun):
generator_id: UUID
def get_gen(self, meta_engine, *args, **kwargs):
with Session(meta_engine) as sess:
gen_json = sess.exec(
| select(GeneratorEntity.gen_json) | sqlmodel.select |
from create_db import Student
from sqlmodel import Session, create_engine, select
sqlite_url = "sqlite:///school.db"
engine = | create_engine(sqlite_url, echo=True) | sqlmodel.create_engine |
from create_db import Student
from sqlmodel import Session, create_engine, select
sqlite_url = "sqlite:///school.db"
engine = create_engine(sqlite_url, echo=True)
# Read database
with | Session(engine) | sqlmodel.Session |
from create_db import Student
from sqlmodel import Session, create_engine, select
sqlite_url = "sqlite:///school.db"
engine = create_engine(sqlite_url, echo=True)
# Read database
with Session(engine) as session:
statement = | select(Student) | sqlmodel.select |
from sqlmodel import create_engine, Session
from sqlmodel.main import SQLModel
from core.config import settings
engine = | create_engine(settings.SQLALCHEMY_DATABASE_URI, pool_pre_ping=True) | sqlmodel.create_engine |
from sqlmodel import create_engine, Session
from sqlmodel.main import SQLModel
from core.config import settings
engine = create_engine(settings.SQLALCHEMY_DATABASE_URI, pool_pre_ping=True)
def init_db():
| SQLModel.metadata.create_all(engine) | sqlmodel.main.SQLModel.metadata.create_all |
from sqlmodel import create_engine, Session
from sqlmodel.main import SQLModel
from core.config import settings
engine = create_engine(settings.SQLALCHEMY_DATABASE_URI, pool_pre_ping=True)
def init_db():
SQLModel.metadata.create_all(engine)
def get_session():
with | Session(engine) | sqlmodel.Session |
from typing import Union
from fastapi import Request
from fastapi.param_functions import Depends, Header
from fastapi.security import OAuth2PasswordBearer
from sqlmodel import Session, select
from ..core.constants import AccessLevel, ContextEnum
from ..core.helpers.database import make_session
from ..core.helpers.exceptions import NotAuthorizedError
from ..core.models import Context, ParsedToken, User
from ..core.security import load_jwt_token
reusable_oauth2 = OAuth2PasswordBearer(tokenUrl="api/v1/auth/access-token")
def get_string_token(token: str = Header(None, alias="Authorization")) -> Union[None, str]:
if token:
_, _, token = token.partition(" ")
return token
def load_access_token(request: Request, token: str = Depends(reusable_oauth2)) -> ParsedToken:
return load_jwt_token(token)
async def get_current_user(
session: Session = Depends(make_session), token: ParsedToken = Depends(load_access_token)
) -> User:
user = session.exec( | select(User) | sqlmodel.select |
from typing import Optional
from sqlmodel import Field, SQLModel, create_engine
class Student(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
first_name: str
last_name: str
email: str
# dob:
sqlite_url = "sqlite:///school.db"
engine = | create_engine(sqlite_url) | sqlmodel.create_engine |
from typing import Optional
from sqlmodel import Field, SQLModel, create_engine
class Student(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
first_name: str
last_name: str
email: str
# dob:
sqlite_url = "sqlite:///school.db"
engine = create_engine(sqlite_url)
| SQLModel.metadata.create_all(engine) | sqlmodel.SQLModel.metadata.create_all |
from typing import Optional
from sqlmodel import Field, SQLModel, create_engine
class Student(SQLModel, table=True):
id: Optional[int] = | Field(default=None, primary_key=True) | sqlmodel.Field |
import os
from dotenv import load_dotenv
from dateutil.parser import parse
from sqlmodel import Session, select, SQLModel, create_engine
import requests
from youtube.models import YouTube
load_dotenv()
YT_CHANNEL = os.environ["YT_CHANNEL"]
YOUTUBE_API_KEY = os.environ["YOUTUBE_API_KEY"]
DATABASE_URL = os.environ["DATABASE_URL"]
YOUTUBE_VIDEO = "youtube#video"
BASE_URL = (
"https://www.googleapis.com/youtube/v3/search?key={key}"
"&channelId={channel}&part=snippet,id&order=date&maxResults=20"
)
engine = | create_engine(DATABASE_URL, echo=False) | sqlmodel.create_engine |
import os
from dotenv import load_dotenv
from dateutil.parser import parse
from sqlmodel import Session, select, SQLModel, create_engine
import requests
from youtube.models import YouTube
load_dotenv()
YT_CHANNEL = os.environ["YT_CHANNEL"]
YOUTUBE_API_KEY = os.environ["YOUTUBE_API_KEY"]
DATABASE_URL = os.environ["DATABASE_URL"]
YOUTUBE_VIDEO = "youtube#video"
BASE_URL = (
"https://www.googleapis.com/youtube/v3/search?key={key}"
"&channelId={channel}&part=snippet,id&order=date&maxResults=20"
)
engine = create_engine(DATABASE_URL, echo=False)
def get_session():
with Session(engine) as session:
yield session
def create_db_and_tables():
| SQLModel.metadata.create_all(engine) | sqlmodel.SQLModel.metadata.create_all |
import os
from dotenv import load_dotenv
from dateutil.parser import parse
from sqlmodel import Session, select, SQLModel, create_engine
import requests
from youtube.models import YouTube
load_dotenv()
YT_CHANNEL = os.environ["YT_CHANNEL"]
YOUTUBE_API_KEY = os.environ["YOUTUBE_API_KEY"]
DATABASE_URL = os.environ["DATABASE_URL"]
YOUTUBE_VIDEO = "youtube#video"
BASE_URL = (
"https://www.googleapis.com/youtube/v3/search?key={key}"
"&channelId={channel}&part=snippet,id&order=date&maxResults=20"
)
engine = create_engine(DATABASE_URL, echo=False)
def get_session():
with Session(engine) as session:
yield session
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def get_videos_from_channel(channel: str = YT_CHANNEL) -> list[dict]:
base_url = BASE_URL.format(key=YOUTUBE_API_KEY,
channel=channel)
next_page, url = None, base_url
videos = []
while True:
if next_page is not None:
url = base_url + f"&pageToken={next_page}"
response = requests.get(url).json()
for vid in response["items"]:
if vid["id"]["kind"] != "youtube#video":
continue
videos.append(vid)
if "nextPageToken" not in response:
break
next_page = response["nextPageToken"]
return videos
def insert_youtube_videos(session: Session, videos: list[dict]) -> None:
num_inserted = 0
for video in videos:
video_id = video["id"]["videoId"]
title = video["snippet"]["title"]
description = video["snippet"]["description"]
thumb = video["snippet"]["thumbnails"]["medium"]["url"]
published = video["snippet"]["publishTime"]
statement = select(YouTube).where(YouTube.video_id == video_id)
results = session.exec(statement)
if results.first() is not None:
continue
youtube = YouTube(
video_id=video_id,
title=title,
description=description,
thumb=thumb,
published=parse(published),
)
session.add(youtube)
num_inserted += 1
session.commit()
statement = | select(YouTube) | sqlmodel.select |
import os
from dotenv import load_dotenv
from dateutil.parser import parse
from sqlmodel import Session, select, SQLModel, create_engine
import requests
from youtube.models import YouTube
load_dotenv()
YT_CHANNEL = os.environ["YT_CHANNEL"]
YOUTUBE_API_KEY = os.environ["YOUTUBE_API_KEY"]
DATABASE_URL = os.environ["DATABASE_URL"]
YOUTUBE_VIDEO = "youtube#video"
BASE_URL = (
"https://www.googleapis.com/youtube/v3/search?key={key}"
"&channelId={channel}&part=snippet,id&order=date&maxResults=20"
)
engine = create_engine(DATABASE_URL, echo=False)
def get_session():
with | Session(engine) | sqlmodel.Session |
import os
from dotenv import load_dotenv
from dateutil.parser import parse
from sqlmodel import Session, select, SQLModel, create_engine
import requests
from youtube.models import YouTube
load_dotenv()
YT_CHANNEL = os.environ["YT_CHANNEL"]
YOUTUBE_API_KEY = os.environ["YOUTUBE_API_KEY"]
DATABASE_URL = os.environ["DATABASE_URL"]
YOUTUBE_VIDEO = "youtube#video"
BASE_URL = (
"https://www.googleapis.com/youtube/v3/search?key={key}"
"&channelId={channel}&part=snippet,id&order=date&maxResults=20"
)
engine = create_engine(DATABASE_URL, echo=False)
def get_session():
with Session(engine) as session:
yield session
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def get_videos_from_channel(channel: str = YT_CHANNEL) -> list[dict]:
base_url = BASE_URL.format(key=YOUTUBE_API_KEY,
channel=channel)
next_page, url = None, base_url
videos = []
while True:
if next_page is not None:
url = base_url + f"&pageToken={next_page}"
response = requests.get(url).json()
for vid in response["items"]:
if vid["id"]["kind"] != "youtube#video":
continue
videos.append(vid)
if "nextPageToken" not in response:
break
next_page = response["nextPageToken"]
return videos
def insert_youtube_videos(session: Session, videos: list[dict]) -> None:
num_inserted = 0
for video in videos:
video_id = video["id"]["videoId"]
title = video["snippet"]["title"]
description = video["snippet"]["description"]
thumb = video["snippet"]["thumbnails"]["medium"]["url"]
published = video["snippet"]["publishTime"]
statement = select(YouTube).where(YouTube.video_id == video_id)
results = session.exec(statement)
if results.first() is not None:
continue
youtube = YouTube(
video_id=video_id,
title=title,
description=description,
thumb=thumb,
published=parse(published),
)
session.add(youtube)
num_inserted += 1
session.commit()
statement = select(YouTube)
results = session.exec(statement)
total_records = len(results.all())
print(f"Total records: {total_records} (newly inserted: {num_inserted})")
if __name__ == "__main__":
create_db_and_tables()
videos = get_videos_from_channel()
with | Session(engine) | sqlmodel.Session |
import os
from dotenv import load_dotenv
from dateutil.parser import parse
from sqlmodel import Session, select, SQLModel, create_engine
import requests
from youtube.models import YouTube
load_dotenv()
YT_CHANNEL = os.environ["YT_CHANNEL"]
YOUTUBE_API_KEY = os.environ["YOUTUBE_API_KEY"]
DATABASE_URL = os.environ["DATABASE_URL"]
YOUTUBE_VIDEO = "youtube#video"
BASE_URL = (
"https://www.googleapis.com/youtube/v3/search?key={key}"
"&channelId={channel}&part=snippet,id&order=date&maxResults=20"
)
engine = create_engine(DATABASE_URL, echo=False)
def get_session():
with Session(engine) as session:
yield session
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def get_videos_from_channel(channel: str = YT_CHANNEL) -> list[dict]:
base_url = BASE_URL.format(key=YOUTUBE_API_KEY,
channel=channel)
next_page, url = None, base_url
videos = []
while True:
if next_page is not None:
url = base_url + f"&pageToken={next_page}"
response = requests.get(url).json()
for vid in response["items"]:
if vid["id"]["kind"] != "youtube#video":
continue
videos.append(vid)
if "nextPageToken" not in response:
break
next_page = response["nextPageToken"]
return videos
def insert_youtube_videos(session: Session, videos: list[dict]) -> None:
num_inserted = 0
for video in videos:
video_id = video["id"]["videoId"]
title = video["snippet"]["title"]
description = video["snippet"]["description"]
thumb = video["snippet"]["thumbnails"]["medium"]["url"]
published = video["snippet"]["publishTime"]
statement = | select(YouTube) | sqlmodel.select |
from datetime import datetime
from typing import Optional
from fastapi import APIRouter, Depends
from sqlmodel import Field, SQLModel
from ...db import get_session
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
router = APIRouter()
class HistoryEntPlasticConference(SQLModel, table=True):
id: Optional[int] = | Field(default=None, primary_key=True) | sqlmodel.Field |
from datetime import datetime
from typing import Optional
from fastapi import APIRouter, Depends
from sqlmodel import Field, SQLModel
from ...db import get_session
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
router = APIRouter()
class HistoryEntPlasticConference(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
history_id_order: int
history_id_conference: int
ent_plastic_conference_id: int
state: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class EntPlasticConference(SQLModel, table=True):
id: Optional[int] = | Field(default=None, primary_key=True) | sqlmodel.Field |
from datetime import datetime
from typing import Optional
from fastapi import APIRouter, Depends
from sqlmodel import Field, SQLModel
from ...db import get_session
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
router = APIRouter()
class HistoryEntPlasticConference(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
history_id_order: int
history_id_conference: int
ent_plastic_conference_id: int
state: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class EntPlasticConference(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
problem: str
question: str
ent_plan: str
surgeon_plant: str
post_plan: str
surgeon_post_plan: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class EntPlasticConferenceDoctorMap(SQLModel, table=True):
id: Optional[int] = | Field(default=None, primary_key=True) | sqlmodel.Field |
from fastapi.exceptions import HTTPException
import pytest
from sqlmodel import select, Session
from sqlmodel.main import SQLModel
from sfm.routes.projects import crud
from tests.conftest import hashed_token1, hashed_token2
from sfm.models import Project, ProjectCreate, ProjectUpdate
# get_all
def test_get_all(db, session: Session):
"""test that the crud function works as expected"""
response = crud.get_all(db)
assert response is not None
assert response[0].name == "Test Project 1"
assert response[0].lead_name == "<NAME>"
assert response[0].lead_email == "<EMAIL>"
assert response[0].description == "A test project for testing"
assert response[0].location == "Strangeville"
assert response[0].repo_url == "github.com/starkEnterprises"
assert response[0].on_prem is False
assert response[0].project_auth_token_hashed == hashed_token1
assert response[1].name == "Test Project 2"
assert response[1].lead_name == "<NAME>"
assert response[1].lead_email == "<EMAIL>"
assert response[1].description == "A second test project for testing"
assert response[1].location == "Kohler"
assert response[1].repo_url == "github.com/pgaGolf"
assert response[1].on_prem is False
assert response[1].project_auth_token_hashed == hashed_token2
"""
Test that the function raises an error when there are
no projects in the table
"""
session.delete(session.get(Project, 1))
session.delete(session.get(Project, 2))
session.commit()
# SQLModel.metadata.drop_all(engine)
with pytest.raises(Exception) as ex:
crud.get_all(db)
assert ex.value.message == "Projects not found"
# get_by_id
def test_get_by_id(db):
"""test that the crud function works as expected"""
response = crud.get_by_id(db, project_id=1)
assert response is not None
assert response.name == "Test Project 1"
assert response.lead_name == "<NAME>"
assert response.lead_email == "<EMAIL>"
assert response.description == "A test project for testing"
assert response.location == "Strangeville"
assert response.repo_url == "github.com/starkEnterprises"
assert response.on_prem is False
assert response.project_auth_token_hashed == hashed_token1
"""
Testing that the crud function raises exception when the project
does with matching id does not exist in DB
"""
with pytest.raises(Exception) as ex:
crud.get_by_id(db, project_id=15)
assert ex.value.message == "Project not found"
# create_project
def test_create(db):
"""Testing that the project works as expected"""
project_data = ProjectCreate(
**{
"name": "Test Project 3",
"lead_name": "<NAME>",
"lead_email": "<EMAIL>",
"description": "A test project for testing creation",
"location": "Bikini Gotham",
"repo_url": "github.com/crustyEnterprises",
"on_prem": True,
}
)
response = crud.create_project(db, project_data, admin_key="admin_key")
assert len(response) == 2
assert response[0].name == "Test Project 3"
assert response[0].lead_name == "<NAME>"
assert response[0].lead_email == "<EMAIL>"
assert response[0].description == "A test project for testing creation"
assert response[0].location == "Bikini Gotham"
assert response[0].repo_url == "github.com/crustyEnterprises"
assert response[0].on_prem is True
assert response[0].project_auth_token_hashed is not None
"""
Testing that the function raises an error if the project name already
exists in the database
"""
with pytest.raises(Exception) as ex:
response = crud.create_project(db, project_data, admin_key="admin_key")
assert ex.value.message == "Database entry already exists"
"""
Testing that the project raises an exception when the admin_key
is incorrect
"""
with pytest.raises(Exception) as ex:
crud.create_project(db, project_data, admin_key="Shmadmin_key")
assert ex.value.message == "Credentials are incorrect"
# delete_project
def test_delete_project(db):
"""Testing that the crud function works as expected"""
response = crud.delete_project(db, project_id=1, admin_key="admin_key")
assert response is True
projects = db.exec( | select(Project) | sqlmodel.select |
from typing import List, Union
from fastapi import APIRouter, Request
from fastapi.exceptions import HTTPException
from sqlmodel import Session, or_, select
from ..db import ActiveSession
from ..security import (
AdminUser,
AuthenticatedUser,
HashedPassword,
User,
UserCreate,
UserPasswordPatch,
UserResponse,
get_current_user,
)
router = APIRouter()
@router.get("/", response_model=List[UserResponse], dependencies=[AdminUser])
async def list_users(*, session: Session = ActiveSession):
users = session.exec( | select(User) | sqlmodel.select |
"""init
Revision ID: f9c634db477d
Revises:
Create Date: 2021-09-10 00:24:32.718895
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = 'f9c634db477d'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('song',
sa.Column('name', | sqlmodel.sql.sqltypes.AutoString() | sqlmodel.sql.sqltypes.AutoString |
"""init
Revision ID: f9c634db477d
Revises:
Create Date: 2021-09-10 00:24:32.718895
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = 'f9c634db477d'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('song',
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('artist', | sqlmodel.sql.sqltypes.AutoString() | sqlmodel.sql.sqltypes.AutoString |
import asyncio
import strawberry
from sqlmodel import Session, select
from strawberry.types import Info
from fastapi_server.models.user import User
from fastapi_server.routes.graph_ql.broadcaster import Broadcast
broadcast = Broadcast()
@strawberry.type
class UserSystemQuery:
@strawberry.field
def user_login(self, info: Info, email: str, password: str) -> str:
# TODO Replace with actual password hash function
session: Session = info.context['session']
statement = | select(User) | sqlmodel.select |
import asyncio
import strawberry
from sqlmodel import Session, select
from strawberry.types import Info
from fastapi_server.models.user import User
from fastapi_server.routes.graph_ql.broadcaster import Broadcast
broadcast = Broadcast()
@strawberry.type
class UserSystemQuery:
@strawberry.field
def user_login(self, info: Info, email: str, password: str) -> str:
# TODO Replace with actual password hash function
session: Session = info.context['session']
statement = select(User).where(User.email == email, User.password_hashed == password)
user = session.exec(statement).first()
if user is None:
raise FileNotFoundError('Email and password do not match')
return f'Login successful for {email}'
@strawberry.type
class UserSystemMutation:
@strawberry.mutation
def user_register(self, info: Info, username: str, email: str, password: str, password_repeated: str) -> bool:
if password != password_repeated:
raise KeyError('not same pw')
# TODO Replace with actual password hash function
password_hashed = hash(password)
session: Session = info.context['session']
username_taken = session.exec( | select(User) | sqlmodel.select |
import asyncio
import strawberry
from sqlmodel import Session, select
from strawberry.types import Info
from fastapi_server.models.user import User
from fastapi_server.routes.graph_ql.broadcaster import Broadcast
broadcast = Broadcast()
@strawberry.type
class UserSystemQuery:
@strawberry.field
def user_login(self, info: Info, email: str, password: str) -> str:
# TODO Replace with actual password hash function
session: Session = info.context['session']
statement = select(User).where(User.email == email, User.password_hashed == password)
user = session.exec(statement).first()
if user is None:
raise FileNotFoundError('Email and password do not match')
return f'Login successful for {email}'
@strawberry.type
class UserSystemMutation:
@strawberry.mutation
def user_register(self, info: Info, username: str, email: str, password: str, password_repeated: str) -> bool:
if password != password_repeated:
raise KeyError('not same pw')
# TODO Replace with actual password hash function
password_hashed = hash(password)
session: Session = info.context['session']
username_taken = session.exec(select(User).where(User.username == username)).first()
if username_taken is not None:
raise KeyError('username taken')
email_taken = session.exec( | select(User) | sqlmodel.select |
"""
Functions for building queries, from nodes or SQL.
"""
import ast
import datetime
import operator
import re
from typing import Any, Callable, Dict, List, Literal, Optional, Set, Tuple, cast
from dateutil.parser import parse
from sqlalchemy.engine.url import make_url
from sqlalchemy.schema import Column as SqlaColumn
from sqlalchemy.sql.elements import BinaryExpression
from sqlalchemy.sql.expression import ClauseElement
from sqlmodel import Session, select
from sqloxide import parse_sql
from datajunction.constants import DEFAULT_DIMENSION_COLUMN
from datajunction.errors import DJError, DJInvalidInputException, ErrorCode
from datajunction.models.node import Node, NodeType
from datajunction.models.query import QueryCreate
from datajunction.sql.dag import (
get_database_for_nodes,
get_dimensions,
get_referenced_columns_from_sql,
get_referenced_columns_from_tree,
)
from datajunction.sql.parse import (
find_nodes_by_key,
find_nodes_by_key_with_parent,
get_expression_from_projection,
)
from datajunction.sql.transpile import get_query, get_select_for_node
from datajunction.typing import (
Expression,
Identifier,
Join,
Projection,
Relation,
Select,
)
from datajunction.utils import get_session
FILTER_RE = re.compile(r"([\w\./_]+)(<=|<|>=|>|!=|=)(.+)")
FilterOperator = Literal[">", ">=", "<", "<=", "=", "!="]
COMPARISONS: Dict[FilterOperator, Callable[[Any, Any], bool]] = {
">": operator.gt,
">=": operator.ge,
"<": operator.lt,
"<=": operator.le,
"=": operator.eq,
"!=": operator.ne,
}
def parse_filter(filter_: str) -> Tuple[str, FilterOperator, str]:
"""
Parse a filter into name, op, value.
"""
match = FILTER_RE.match(filter_)
if not match:
raise DJInvalidInputException(
message=f'The filter "{filter_}" is invalid',
errors=[
DJError(
code=ErrorCode.INVALID_FILTER_PATTERN,
message=(
f'The filter "{filter_}" is not a valid filter. Filters should '
"consist of a dimension name, follow by a valid operator "
"(<=|<|>=|>|!=|=), followed by a value. If the value is a "
"string or date/time it should be enclosed in single quotes."
),
debug={"context": {"filter": filter_}},
),
],
)
name, operator_, value = match.groups()
operator_ = cast(FilterOperator, operator_)
return name, operator_, value
def get_filter(columns: Dict[str, SqlaColumn], filter_: str) -> BinaryExpression:
"""
Build a SQLAlchemy filter.
"""
name, operator_, value = parse_filter(filter_)
if name not in columns:
raise Exception(f"Invalid column name: {name}")
column = columns[name]
comparison = COMPARISONS[operator_]
if column.type.python_type in [datetime.date, datetime.datetime]:
try:
value = str(parse(value))
except Exception as ex:
raise Exception(f"Invalid date or datetime value: {value}") from ex
else:
try:
value = ast.literal_eval(value)
except Exception as ex:
raise Exception(f"Invalid value: {value}") from ex
return comparison(column, value)
def get_dimensions_from_filters(filters: List[str]) -> Set[str]:
"""
Extract dimensions from filters passed to the metric API.
"""
return {parse_filter(filter_)[0] for filter_ in filters}
def get_query_for_node( # pylint: disable=too-many-locals
session: Session,
node: Node,
groupbys: List[str],
filters: List[str],
database_id: Optional[int] = None,
) -> QueryCreate:
"""
Return a DJ QueryCreate object from a given node.
"""
# check that groupbys and filters are valid dimensions
requested_dimensions = set(groupbys) | get_dimensions_from_filters(filters)
valid_dimensions = set(get_dimensions(node))
if not requested_dimensions <= valid_dimensions:
invalid = sorted(requested_dimensions - valid_dimensions)
plural = "s" if len(invalid) > 1 else ""
raise Exception(f"Invalid dimension{plural}: {', '.join(invalid)}")
# which columns are needed from the parents; this is used to determine the database
# where the query will run
referenced_columns = get_referenced_columns_from_sql(node.expression, node.parents)
# extract all referenced dimensions so we can join the node with them
dimensions: Dict[str, Node] = {}
for dimension in requested_dimensions:
name, column = dimension.rsplit(".", 1)
if (
name not in {parent.name for parent in node.parents}
and name not in dimensions
):
dimensions[name] = session.exec(select(Node).where(Node.name == name)).one()
referenced_columns[name].add(column)
# find database
nodes = [node]
nodes.extend(dimensions.values())
database = get_database_for_nodes(session, nodes, referenced_columns, database_id)
# base query
node_select = get_select_for_node(node, database)
source = node_select.froms[0]
# join with dimensions
for dimension in dimensions.values():
subquery = get_select_for_node(
dimension,
database,
referenced_columns[dimension.name],
).alias(dimension.name)
condition = find_on_clause(node, source, dimension, subquery)
node_select = node_select.select_from(source.join(subquery, condition))
columns = {
f"{column.table.name}.{column.name}": column
for from_ in node_select.froms
for column in from_.columns
}
# filter
node_select = node_select.filter(
*[get_filter(columns, filter_) for filter_ in filters]
)
# groupby
node_select = node_select.group_by(*[columns[groupby] for groupby in groupbys])
# add groupbys to projection as well
for groupby in groupbys:
node_select.append_column(columns[groupby])
dialect = make_url(database.URI).get_dialect()
sql = str(
node_select.compile(dialect=dialect(), compile_kwargs={"literal_binds": True}),
)
return QueryCreate(database_id=database.id, submitted_query=sql)
def find_on_clause(
node: Node,
node_select: Select,
dimension: Node,
subquery: Select,
) -> ClauseElement:
"""
Return the on clause for a node/dimension selects.
"""
for parent in node.parents:
for column in parent.columns:
if column.dimension == dimension:
dimension_column = column.dimension_column or DEFAULT_DIMENSION_COLUMN
return (
node_select.columns[column.name] # type: ignore
== subquery.columns[dimension_column] # type: ignore
)
raise Exception(f"Node {node.name} has no columns with dimension {dimension.name}")
# pylint: disable=too-many-branches, too-many-locals, too-many-statements
def get_query_for_sql(sql: str) -> QueryCreate:
"""
Return a query given a SQL expression querying the repo.
Eg:
SELECT
"core.users.gender", "core.num_comments"
FROM metrics
WHERE "core.comments.user_id" > 1
GROUP BY
"core.users.gender"
This works by converting metrics (``core.num_comments``) into their selection
definition (``COUNT(*)``), updating the sources to include the metrics parents
(including joining with dimensions), and updating column references in the
``WHERE``, ``GROUP BY``, etc.
"""
session = next(get_session())
tree = parse_sql(sql, dialect="ansi")
query_select = tree[0]["Query"]["body"]["Select"]
# fetch all metric and dimension nodes
nodes = {node.name: node for node in session.exec( | select(Node) | sqlmodel.select |
"""
Functions for building queries, from nodes or SQL.
"""
import ast
import datetime
import operator
import re
from typing import Any, Callable, Dict, List, Literal, Optional, Set, Tuple, cast
from dateutil.parser import parse
from sqlalchemy.engine.url import make_url
from sqlalchemy.schema import Column as SqlaColumn
from sqlalchemy.sql.elements import BinaryExpression
from sqlalchemy.sql.expression import ClauseElement
from sqlmodel import Session, select
from sqloxide import parse_sql
from datajunction.constants import DEFAULT_DIMENSION_COLUMN
from datajunction.errors import DJError, DJInvalidInputException, ErrorCode
from datajunction.models.node import Node, NodeType
from datajunction.models.query import QueryCreate
from datajunction.sql.dag import (
get_database_for_nodes,
get_dimensions,
get_referenced_columns_from_sql,
get_referenced_columns_from_tree,
)
from datajunction.sql.parse import (
find_nodes_by_key,
find_nodes_by_key_with_parent,
get_expression_from_projection,
)
from datajunction.sql.transpile import get_query, get_select_for_node
from datajunction.typing import (
Expression,
Identifier,
Join,
Projection,
Relation,
Select,
)
from datajunction.utils import get_session
FILTER_RE = re.compile(r"([\w\./_]+)(<=|<|>=|>|!=|=)(.+)")
FilterOperator = Literal[">", ">=", "<", "<=", "=", "!="]
COMPARISONS: Dict[FilterOperator, Callable[[Any, Any], bool]] = {
">": operator.gt,
">=": operator.ge,
"<": operator.lt,
"<=": operator.le,
"=": operator.eq,
"!=": operator.ne,
}
def parse_filter(filter_: str) -> Tuple[str, FilterOperator, str]:
"""
Parse a filter into name, op, value.
"""
match = FILTER_RE.match(filter_)
if not match:
raise DJInvalidInputException(
message=f'The filter "{filter_}" is invalid',
errors=[
DJError(
code=ErrorCode.INVALID_FILTER_PATTERN,
message=(
f'The filter "{filter_}" is not a valid filter. Filters should '
"consist of a dimension name, follow by a valid operator "
"(<=|<|>=|>|!=|=), followed by a value. If the value is a "
"string or date/time it should be enclosed in single quotes."
),
debug={"context": {"filter": filter_}},
),
],
)
name, operator_, value = match.groups()
operator_ = cast(FilterOperator, operator_)
return name, operator_, value
def get_filter(columns: Dict[str, SqlaColumn], filter_: str) -> BinaryExpression:
"""
Build a SQLAlchemy filter.
"""
name, operator_, value = parse_filter(filter_)
if name not in columns:
raise Exception(f"Invalid column name: {name}")
column = columns[name]
comparison = COMPARISONS[operator_]
if column.type.python_type in [datetime.date, datetime.datetime]:
try:
value = str(parse(value))
except Exception as ex:
raise Exception(f"Invalid date or datetime value: {value}") from ex
else:
try:
value = ast.literal_eval(value)
except Exception as ex:
raise Exception(f"Invalid value: {value}") from ex
return comparison(column, value)
def get_dimensions_from_filters(filters: List[str]) -> Set[str]:
"""
Extract dimensions from filters passed to the metric API.
"""
return {parse_filter(filter_)[0] for filter_ in filters}
def get_query_for_node( # pylint: disable=too-many-locals
session: Session,
node: Node,
groupbys: List[str],
filters: List[str],
database_id: Optional[int] = None,
) -> QueryCreate:
"""
Return a DJ QueryCreate object from a given node.
"""
# check that groupbys and filters are valid dimensions
requested_dimensions = set(groupbys) | get_dimensions_from_filters(filters)
valid_dimensions = set(get_dimensions(node))
if not requested_dimensions <= valid_dimensions:
invalid = sorted(requested_dimensions - valid_dimensions)
plural = "s" if len(invalid) > 1 else ""
raise Exception(f"Invalid dimension{plural}: {', '.join(invalid)}")
# which columns are needed from the parents; this is used to determine the database
# where the query will run
referenced_columns = get_referenced_columns_from_sql(node.expression, node.parents)
# extract all referenced dimensions so we can join the node with them
dimensions: Dict[str, Node] = {}
for dimension in requested_dimensions:
name, column = dimension.rsplit(".", 1)
if (
name not in {parent.name for parent in node.parents}
and name not in dimensions
):
dimensions[name] = session.exec( | select(Node) | sqlmodel.select |
from collections import deque
from time import sleep
import pytest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import WebDriverException
from sqlmodel import Session, select
from youtube.db import engine
from youtube.models import YouTube
@pytest.fixture(scope="session")
def driver():
driver = webdriver.Chrome()
try:
driver.get("http://localhost:8000/")
yield driver
except WebDriverException:
raise RuntimeError("Cannot get to localhost:8000, did you start FastAPI?")
finally:
driver.quit()
@pytest.fixture(scope="session")
def scroll_to_end(driver):
cache_size = 5
num_rows = deque(maxlen=cache_size)
i = 0
while True:
last_element = driver.find_elements_by_class_name("mui--text-subhead")[-1]
actions = webdriver.ActionChains(driver)
actions.move_to_element(last_element).perform()
i += 1
num_rows.append(len(driver.find_elements_by_tag_name("tr")))
if i > cache_size and num_rows.count(num_rows[-1]) == len(num_rows):
print("num rows stable, seems I hit the end of infinite scroll")
break
def test_number_of_rows_on_page(session, driver, scroll_to_end):
with | Session(engine) | sqlmodel.Session |
from collections import deque
from time import sleep
import pytest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import WebDriverException
from sqlmodel import Session, select
from youtube.db import engine
from youtube.models import YouTube
@pytest.fixture(scope="session")
def driver():
driver = webdriver.Chrome()
try:
driver.get("http://localhost:8000/")
yield driver
except WebDriverException:
raise RuntimeError("Cannot get to localhost:8000, did you start FastAPI?")
finally:
driver.quit()
@pytest.fixture(scope="session")
def scroll_to_end(driver):
cache_size = 5
num_rows = deque(maxlen=cache_size)
i = 0
while True:
last_element = driver.find_elements_by_class_name("mui--text-subhead")[-1]
actions = webdriver.ActionChains(driver)
actions.move_to_element(last_element).perform()
i += 1
num_rows.append(len(driver.find_elements_by_tag_name("tr")))
if i > cache_size and num_rows.count(num_rows[-1]) == len(num_rows):
print("num rows stable, seems I hit the end of infinite scroll")
break
def test_number_of_rows_on_page(session, driver, scroll_to_end):
with Session(engine) as session:
num_row_in_db_table = len(session.exec( | select(YouTube) | sqlmodel.select |
from typing import List
from uuid import UUID
import inject
from sqlmodel import Session, select
from src.core.events import EventDescription
from src.core.helpers.exceptions import DatabaseError, NotAuthorizedError, NotFoundError
from src.core.models import Client, Context, CreateClient, QueryClient, UpdateClient
from src.core.services import Streamer
@inject.params(streamer=Streamer)
def create(session: Session, schema: CreateClient, context: Context, streamer: Streamer) -> Client:
if session.exec( | select(Client) | sqlmodel.select |
from typing import List
from uuid import UUID
import inject
from sqlmodel import Session, select
from src.core.events import EventDescription
from src.core.helpers.exceptions import DatabaseError, NotAuthorizedError, NotFoundError
from src.core.models import Client, Context, CreateClient, QueryClient, UpdateClient
from src.core.services import Streamer
@inject.params(streamer=Streamer)
def create(session: Session, schema: CreateClient, context: Context, streamer: Streamer) -> Client:
if session.exec(select(Client).where(Client.email == schema.email)).first():
raise DatabaseError("Já existe um cliente cadastrado com o email: %s" % schema.email)
if session.exec( | select(Client) | sqlmodel.select |
from typing import List
from uuid import UUID
import inject
from sqlmodel import Session, select
from src.core.events import EventDescription
from src.core.helpers.exceptions import DatabaseError, NotAuthorizedError, NotFoundError
from src.core.models import Client, Context, CreateClient, QueryClient, UpdateClient
from src.core.services import Streamer
@inject.params(streamer=Streamer)
def create(session: Session, schema: CreateClient, context: Context, streamer: Streamer) -> Client:
if session.exec(select(Client).where(Client.email == schema.email)).first():
raise DatabaseError("Já existe um cliente cadastrado com o email: %s" % schema.email)
if session.exec(select(Client).where(Client.phone == schema.phone)).first():
raise DatabaseError("Já existe um cliente cadastrado com o telefone: %s" % schema.phone)
client = Client(**schema.dict(), owner_id=context.user_id)
session.add(client)
session.commit()
streamer.send_event(EventDescription.CREATE_USER, context=context, client=client.dict())
return client
def get_all(session: Session, query_schema: QueryClient, context: Context) -> List[Client]:
args = []
if not context.user_is_super_user:
args.append(Client.owner_id == context.user_id)
return session.exec( | select(Client) | sqlmodel.select |
from typing import List
from uuid import UUID
import inject
from sqlmodel import Session, select
from src.core.events import EventDescription
from src.core.helpers.exceptions import DatabaseError, NotAuthorizedError, NotFoundError
from src.core.models import Client, Context, CreateClient, QueryClient, UpdateClient
from src.core.services import Streamer
@inject.params(streamer=Streamer)
def create(session: Session, schema: CreateClient, context: Context, streamer: Streamer) -> Client:
if session.exec(select(Client).where(Client.email == schema.email)).first():
raise DatabaseError("Já existe um cliente cadastrado com o email: %s" % schema.email)
if session.exec(select(Client).where(Client.phone == schema.phone)).first():
raise DatabaseError("Já existe um cliente cadastrado com o telefone: %s" % schema.phone)
client = Client(**schema.dict(), owner_id=context.user_id)
session.add(client)
session.commit()
streamer.send_event(EventDescription.CREATE_USER, context=context, client=client.dict())
return client
def get_all(session: Session, query_schema: QueryClient, context: Context) -> List[Client]:
args = []
if not context.user_is_super_user:
args.append(Client.owner_id == context.user_id)
return session.exec(select(Client).where(*args)).all()
def get_by_id(session: Session, client_id: UUID, context: Context) -> Client:
client = session.exec( | select(Client) | sqlmodel.select |
from typing import List
from uuid import UUID
import inject
from sqlmodel import Session, select
from src.core.events import EventDescription
from src.core.helpers.exceptions import DatabaseError, NotAuthorizedError, NotFoundError
from src.core.models import Client, Context, CreateClient, QueryClient, UpdateClient
from src.core.services import Streamer
@inject.params(streamer=Streamer)
def create(session: Session, schema: CreateClient, context: Context, streamer: Streamer) -> Client:
if session.exec(select(Client).where(Client.email == schema.email)).first():
raise DatabaseError("Já existe um cliente cadastrado com o email: %s" % schema.email)
if session.exec(select(Client).where(Client.phone == schema.phone)).first():
raise DatabaseError("Já existe um cliente cadastrado com o telefone: %s" % schema.phone)
client = Client(**schema.dict(), owner_id=context.user_id)
session.add(client)
session.commit()
streamer.send_event(EventDescription.CREATE_USER, context=context, client=client.dict())
return client
def get_all(session: Session, query_schema: QueryClient, context: Context) -> List[Client]:
args = []
if not context.user_is_super_user:
args.append(Client.owner_id == context.user_id)
return session.exec(select(Client).where(*args)).all()
def get_by_id(session: Session, client_id: UUID, context: Context) -> Client:
client = session.exec(select(Client).where(Client.id == client_id)).first()
if not client:
raise NotFoundError(f"Não foi possível localizar o Client com ID: {client_id}")
if not context.user_is_super_user and client.owner_id != context.user_id:
raise NotAuthorizedError(f"Você não possui permissão para consultar os dados do cliente com ID {client_id}!")
return client
@inject.params(streamer=Streamer)
def delete(session: Session, client_id: UUID, context: Context, streamer: Streamer) -> Client:
client = session.exec( | select(Client) | sqlmodel.select |
from typing import List
from uuid import UUID
import inject
from sqlmodel import Session, select
from src.core.events import EventDescription
from src.core.helpers.exceptions import DatabaseError, NotAuthorizedError, NotFoundError
from src.core.models import Client, Context, CreateClient, QueryClient, UpdateClient
from src.core.services import Streamer
@inject.params(streamer=Streamer)
def create(session: Session, schema: CreateClient, context: Context, streamer: Streamer) -> Client:
if session.exec(select(Client).where(Client.email == schema.email)).first():
raise DatabaseError("Já existe um cliente cadastrado com o email: %s" % schema.email)
if session.exec(select(Client).where(Client.phone == schema.phone)).first():
raise DatabaseError("Já existe um cliente cadastrado com o telefone: %s" % schema.phone)
client = Client(**schema.dict(), owner_id=context.user_id)
session.add(client)
session.commit()
streamer.send_event(EventDescription.CREATE_USER, context=context, client=client.dict())
return client
def get_all(session: Session, query_schema: QueryClient, context: Context) -> List[Client]:
args = []
if not context.user_is_super_user:
args.append(Client.owner_id == context.user_id)
return session.exec(select(Client).where(*args)).all()
def get_by_id(session: Session, client_id: UUID, context: Context) -> Client:
client = session.exec(select(Client).where(Client.id == client_id)).first()
if not client:
raise NotFoundError(f"Não foi possível localizar o Client com ID: {client_id}")
if not context.user_is_super_user and client.owner_id != context.user_id:
raise NotAuthorizedError(f"Você não possui permissão para consultar os dados do cliente com ID {client_id}!")
return client
@inject.params(streamer=Streamer)
def delete(session: Session, client_id: UUID, context: Context, streamer: Streamer) -> Client:
client = session.exec(select(Client).where(Client.id == client_id)).first()
if not client:
raise NotFoundError(f"Não foi possível localizar o Cliente com ID: {client_id}")
if not context.user_is_super_user and client.owner_id != context.user_id:
raise NotAuthorizedError(f"Você não possui permissão para excluir o Cliente com ID: {client_id}")
session.delete(client)
session.commit()
streamer.send_event(description=EventDescription.DELETE_CLIENT, context=context, client=client.dict())
return client
@inject.params(streamer=Streamer)
def update(session: Session, data: UpdateClient, context: Context, streamer: Streamer) -> Client:
client = session.exec( | select(Client) | sqlmodel.select |
from datetime import datetime, date , time
from typing import Optional, List
from fastapi import APIRouter, Depends
from sqlmodel import Field, SQLModel
from ...db import get_session
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
router = APIRouter()
class HistoryAppointmentOr(SQLModel, table=True):
id: Optional[int] = | Field(default=None, primary_key=True) | sqlmodel.Field |
from datetime import datetime, date , time
from typing import Optional, List
from fastapi import APIRouter, Depends
from sqlmodel import Field, SQLModel
from ...db import get_session
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
router = APIRouter()
class HistoryAppointmentOr(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
history_id: int
appointment_or_id: int
state_from: str
state_to: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class HistoryAppointmentOrMap(SQLModel, table=True):
id: Optional[int] = | Field(default=None, primary_key=True) | sqlmodel.Field |
from datetime import datetime, date , time
from typing import Optional, List
from fastapi import APIRouter, Depends
from sqlmodel import Field, SQLModel
from ...db import get_session
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
router = APIRouter()
class HistoryAppointmentOr(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
history_id: int
appointment_or_id: int
state_from: str
state_to: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class HistoryAppointmentOrMap(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
appointment_or_id: int
procedure_id: int
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class AppointmentOr(SQLModel, table=True):
id: Optional[int] = | Field(default=None, primary_key=True) | sqlmodel.Field |
from datetime import datetime, date , time
from typing import Optional, List
from fastapi import APIRouter, Depends
from sqlmodel import Field, SQLModel
from ...db import get_session
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
router = APIRouter()
class HistoryAppointmentOr(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
history_id: int
appointment_or_id: int
state_from: str
state_to: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class HistoryAppointmentOrMap(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
appointment_or_id: int
procedure_id: int
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class AppointmentOr(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
state: str
date_procedure: date
date_admission: date
date_confirmation: date
time_start: time
time_end: time
disease: str
detail: str
is_special_tool_required: bool
is_icu_reserved: bool
is_date_recorded: bool
tool_note: str
icu_note: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class AppointmentOrReschedule(SQLModel, table=True):
id: Optional[int] = | Field(default=None, primary_key=True) | sqlmodel.Field |
from datetime import datetime, date , time
from typing import Optional, List
from fastapi import APIRouter, Depends
from sqlmodel import Field, SQLModel
from ...db import get_session
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
router = APIRouter()
class HistoryAppointmentOr(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
history_id: int
appointment_or_id: int
state_from: str
state_to: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class HistoryAppointmentOrMap(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
appointment_or_id: int
procedure_id: int
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class AppointmentOr(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
state: str
date_procedure: date
date_admission: date
date_confirmation: date
time_start: time
time_end: time
disease: str
detail: str
is_special_tool_required: bool
is_icu_reserved: bool
is_date_recorded: bool
tool_note: str
icu_note: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class AppointmentOrReschedule(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
appointment_or_id: int
date_from: date
date_to: date
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class AppointmentOrDoctorMap(SQLModel, table=True):
id: Optional[int] = | Field(default=None, primary_key=True) | sqlmodel.Field |
from sqlmodel import SQLModel, create_engine
from aot_quotes.common.db.quotes import Quotes
engine = | create_engine("sqlite:///database.db", echo=True) | sqlmodel.create_engine |
from sqlmodel import SQLModel, create_engine
from aot_quotes.common.db.quotes import Quotes
engine = create_engine("sqlite:///database.db", echo=True)
def migrate():
| SQLModel.metadata.create_all(engine) | sqlmodel.SQLModel.metadata.create_all |
"""
Models for columns.
"""
from typing import TYPE_CHECKING, Optional, TypedDict
from sqlalchemy.sql.schema import Column as SqlaColumn
from sqlalchemy.types import Enum
from sqlmodel import Field, Relationship, SQLModel
from datajunction.typing import ColumnType
if TYPE_CHECKING:
from datajunction.models.node import Node
class ColumnYAML(TypedDict, total=False):
"""
Schema of a column in the YAML file.
"""
type: str
dimension: str
class Column(SQLModel, table=True): # type: ignore
"""
A column.
Columns can be physical (associated with ``Table`` objects) or abstract (associated
with ``Node`` objects).
"""
id: Optional[int] = | Field(default=None, primary_key=True) | sqlmodel.Field |
"""
Models for columns.
"""
from typing import TYPE_CHECKING, Optional, TypedDict
from sqlalchemy.sql.schema import Column as SqlaColumn
from sqlalchemy.types import Enum
from sqlmodel import Field, Relationship, SQLModel
from datajunction.typing import ColumnType
if TYPE_CHECKING:
from datajunction.models.node import Node
class ColumnYAML(TypedDict, total=False):
"""
Schema of a column in the YAML file.
"""
type: str
dimension: str
class Column(SQLModel, table=True): # type: ignore
"""
A column.
Columns can be physical (associated with ``Table`` objects) or abstract (associated
with ``Node`` objects).
"""
id: Optional[int] = Field(default=None, primary_key=True)
name: str
type: ColumnType = Field(sa_column=SqlaColumn(Enum(ColumnType)))
dimension_id: Optional[int] = | Field(default=None, foreign_key="node.id") | sqlmodel.Field |
"""
Models for columns.
"""
from typing import TYPE_CHECKING, Optional, TypedDict
from sqlalchemy.sql.schema import Column as SqlaColumn
from sqlalchemy.types import Enum
from sqlmodel import Field, Relationship, SQLModel
from datajunction.typing import ColumnType
if TYPE_CHECKING:
from datajunction.models.node import Node
class ColumnYAML(TypedDict, total=False):
"""
Schema of a column in the YAML file.
"""
type: str
dimension: str
class Column(SQLModel, table=True): # type: ignore
"""
A column.
Columns can be physical (associated with ``Table`` objects) or abstract (associated
with ``Node`` objects).
"""
id: Optional[int] = Field(default=None, primary_key=True)
name: str
type: ColumnType = Field(sa_column=SqlaColumn(Enum(ColumnType)))
dimension_id: Optional[int] = Field(default=None, foreign_key="node.id")
dimension: "Node" = | Relationship() | sqlmodel.Relationship |
import os
from venv import create
from sqlmodel import SQLModel, create_engine
from .base_variables import APPNAME,DEBUG
from utils import print_warning
import sys
if DEBUG:
PG_HOST: str = os.getenv("PGHOST", "localhost")
PG_USER: str = os.getenv("PGUSER", "postgres")
PG_PASSWORD: str = os.getenv("PGPASSWORD", "<PASSWORD>")
PG_PORT: str = os.getenv("PGPORT", "5432")
PG_DATABASE:str = os.getenv("PGDATABASE", APPNAME)
DATABASE_URL:str = os.getenv("DATABASE_URL", f"postgresql://{PG_USER}:{PG_PASSWORD}@{PG_HOST}:{PG_PORT}/{PG_DATABASE}")
else:
DATABASE_URL = os.getenv("DATABASE_URL", "")
if not DATABASE_URL:
print_warning(
(
"You are not setting the DATABASE_URL in your environment!",
)
)
sys.exit("[ERROR] Default DATABASE_URL is not set\n")
print(DATABASE_URL)
dbengine = | create_engine(DATABASE_URL) | sqlmodel.create_engine |
from typing import Optional
from sqlmodel import Field, SQLModel
from datetime import datetime, date
class Rate(SQLModel, table=True):
"""Create an SQLModel for rates"""
id: Optional[int] = | Field(default=None, primary_key=True) | sqlmodel.Field |
from typing import Optional
from sqlmodel import Field, SQLModel
from datetime import datetime, date
class Rate(SQLModel, table=True):
"""Create an SQLModel for rates"""
id: Optional[int] = Field(default=None, primary_key=True)
user_id: int = | Field(foreign_key="app_db.appuser.id") | sqlmodel.Field |
from typing import Optional
from sqlmodel import Field, SQLModel
from datetime import datetime, date
class Rate(SQLModel, table=True):
"""Create an SQLModel for rates"""
id: Optional[int] = Field(default=None, primary_key=True)
user_id: int = Field(foreign_key="app_db.appuser.id")
client_id: int = | Field(foreign_key="app_db.client.id") | sqlmodel.Field |
"""user latest record
Revision ID: 7c2a518ed636
Revises: fe2df95ee61a
Create Date: 2021-11-27 15:37:54.561822
"""
import sqlalchemy as sa
import sqlmodel
import sqlmodel.sql.sqltypes
from alembic import op
# revision identifiers, used by Alembic.
revision = "7c2a518ed636"
down_revision = "fe2df95ee61a"
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"user_latest_records",
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("TIMEZONE('utc', CURRENT_TIMESTAMP)"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("TIMEZONE('utc', CURRENT_TIMESTAMP)"),
nullable=False,
),
sa.Column("user_id", | sqlmodel.sql.sqltypes.GUID() | sqlmodel.sql.sqltypes.GUID |
"""user latest record
Revision ID: 7c2a518ed636
Revises: fe2df95ee61a
Create Date: 2021-11-27 15:37:54.561822
"""
import sqlalchemy as sa
import sqlmodel
import sqlmodel.sql.sqltypes
from alembic import op
# revision identifiers, used by Alembic.
revision = "7c2a518ed636"
down_revision = "fe2df95ee61a"
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"user_latest_records",
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("TIMEZONE('utc', CURRENT_TIMESTAMP)"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("TIMEZONE('utc', CURRENT_TIMESTAMP)"),
nullable=False,
),
sa.Column("user_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column("problem_id", | sqlmodel.sql.sqltypes.GUID() | sqlmodel.sql.sqltypes.GUID |
"""user latest record
Revision ID: 7c2a518ed636
Revises: fe2df95ee61a
Create Date: 2021-11-27 15:37:54.561822
"""
import sqlalchemy as sa
import sqlmodel
import sqlmodel.sql.sqltypes
from alembic import op
# revision identifiers, used by Alembic.
revision = "7c2a518ed636"
down_revision = "fe2df95ee61a"
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"user_latest_records",
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("TIMEZONE('utc', CURRENT_TIMESTAMP)"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("TIMEZONE('utc', CURRENT_TIMESTAMP)"),
nullable=False,
),
sa.Column("user_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column("problem_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column("problem_set_id", | sqlmodel.sql.sqltypes.GUID() | sqlmodel.sql.sqltypes.GUID |
"""user latest record
Revision ID: 7c2a518ed636
Revises: fe2df95ee61a
Create Date: 2021-11-27 15:37:54.561822
"""
import sqlalchemy as sa
import sqlmodel
import sqlmodel.sql.sqltypes
from alembic import op
# revision identifiers, used by Alembic.
revision = "7c2a518ed636"
down_revision = "fe2df95ee61a"
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"user_latest_records",
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("TIMEZONE('utc', CURRENT_TIMESTAMP)"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("TIMEZONE('utc', CURRENT_TIMESTAMP)"),
nullable=False,
),
sa.Column("user_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column("problem_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column("problem_set_id", sqlmodel.sql.sqltypes.GUID(), nullable=True),
sa.Column("record_id", | sqlmodel.sql.sqltypes.GUID() | sqlmodel.sql.sqltypes.GUID |
"""user latest record
Revision ID: 7c2a518ed636
Revises: fe2df95ee61a
Create Date: 2021-11-27 15:37:54.561822
"""
import sqlalchemy as sa
import sqlmodel
import sqlmodel.sql.sqltypes
from alembic import op
# revision identifiers, used by Alembic.
revision = "7c2a518ed636"
down_revision = "fe2df95ee61a"
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"user_latest_records",
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("TIMEZONE('utc', CURRENT_TIMESTAMP)"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("TIMEZONE('utc', CURRENT_TIMESTAMP)"),
nullable=False,
),
sa.Column("user_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column("problem_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column("problem_set_id", sqlmodel.sql.sqltypes.GUID(), nullable=True),
sa.Column("record_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column("id", | sqlmodel.sql.sqltypes.GUID() | sqlmodel.sql.sqltypes.GUID |
"""user latest record
Revision ID: 7c2a518ed636
Revises: fe2df95ee61a
Create Date: 2021-11-27 15:37:54.561822
"""
import sqlalchemy as sa
import sqlmodel
import sqlmodel.sql.sqltypes
from alembic import op
# revision identifiers, used by Alembic.
revision = "7c2a518ed636"
down_revision = "fe2df95ee61a"
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"user_latest_records",
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("TIMEZONE('utc', CURRENT_TIMESTAMP)"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("TIMEZONE('utc', CURRENT_TIMESTAMP)"),
nullable=False,
),
sa.Column("user_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column("problem_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column("problem_set_id", sqlmodel.sql.sqltypes.GUID(), nullable=True),
sa.Column("record_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column("id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.ForeignKeyConstraint(["problem_id"], ["problems.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(
["problem_set_id"], ["problem_sets.id"], ondelete="CASCADE"
),
sa.ForeignKeyConstraint(["record_id"], ["records.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["user_id"], ["users.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("user_id", "problem_id", "problem_set_id", "record_id"),
)
op.create_index(
op.f("ix_user_latest_records_created_at"),
"user_latest_records",
["created_at"],
unique=False,
)
op.create_index(
op.f("ix_user_latest_records_id"), "user_latest_records", ["id"], unique=False
)
op.create_index(
op.f("ix_user_latest_records_updated_at"),
"user_latest_records",
["updated_at"],
unique=False,
)
op.add_column(
"problem_configs",
sa.Column(
"commit_message",
| sqlmodel.sql.sqltypes.AutoString() | sqlmodel.sql.sqltypes.AutoString |