Use Alembic with GeoAlchemy 2

The Alembic package is a lightweight database migration tool which is able to automatically detect the table column types.

Interactions between Alembic and GeoAlchemy 2

Interactions between some features of Alembic and GeoAlchemy 2 may lead to errors in migration scripts, especially when using the --autogenerate feature of Alembic with the spatial_index=True feature of GeoAlchemy 2. In this case, the following errors occur:

  1. the migration script miss the relevant imports from geoalchemy2.
  2. the migration script will create the indexes of the spatial columns after the table is created, but these indexes are already automatically created during table creation, which will lead to an error.

For example, suppose the following table is defined:

class Lake(Base):
    __tablename__ = 'lake'
    id = Column(Integer, primary_key=True)
    geom = Column(
        Geometry(
            geometry_type='LINESTRING',
            srid=4326,
            spatial_index=True,
        )
    )

Then the command alembic revision --autogenerate -m "Create new table" will create the following migration script:

"""Create new table

Revision ID: 01b69e67a408
Revises: 2371af7aed3f
Create Date: 2022-01-27 15:53:05.268929

"""
from alembic import op
import sqlalchemy as sa


# revision identifiers, used by Alembic.
revision = "01b69e67a408"
down_revision = "2371af7aed3f"
branch_labels = None
depends_on = None


def upgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.create_table(
        "lake",
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column(
            "geom",
            geoalchemy2.types.Geometry(
                geometry_type="LINESTRING",
                srid=4326,
                from_text="ST_GeomFromEWKT",
                name="geometry",
            ),
            nullable=True,
        ),
        sa.PrimaryKeyConstraint("id"),
    )
    op.create_index(
        "idx_lake_geom",
        "lake",
        ["geom"],
        unique=False,
        postgresql_using="gist",
        postgresql_ops={},
    )
    # ### end Alembic commands ###


def downgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.drop_index(
        "idx_lake_geom",
        table_name="lake",
        postgresql_using="gist",
        postgresql_ops={},
    )
    op.drop_table("lake")
    # ### end Alembic commands ###

In this case, we have to do the following changes to make it work:

  1. add the missing import from geoalchemy2 import Geometry.
  2. remove the create_index statement in the upgrade() function.
  3. remove the drop_index statement in the downgrade() function.

Helpers

In order to make the use of Alembic easier, a few helpers are provided in geoalchemy2.alembic_helpers. These helpers can be used in the env.py file used by Alembic, like in the following example:

# ...
from geoalchemy2.alembic_helpers import include_object
from geoalchemy2.alembic_helpers import render_item
# ...

def run_migrations_offline():
    # ...
    context.configure(
        # ...
        render_item=render_item,
        include_object=include_object,
    )
    # ...


def run_migrations_online():
    # ...
    context.configure(
        # ...
        render_item=render_item,
        include_object=include_object,
    )
    # ...

After running the alembic command, the migration script will be properly generated and should not need to be manually edited.

Dealing with custom types

With SQLAlchemy, users are able to define custom types, as shown in Automatically use a function at insert or select. In this case, you can refer to the dedicated page of Alembic’s documentation for the details.

A simple solution for this case is to create a new render_item function to add specific imports for these custom types. For example, if your custom type is called TheCustomType and is defined in my_package.custom_types, you just have to edit the env.py file like the following:

# ...
from geoalchemy2.alembic_helpers import include_object
from geoalchemy2.alembic_helpers import render_item as spatial_render_item
from my_package.custom_types import TheCustomType
# ...


def render_item(obj_type, obj, autogen_context):
    """Apply custom rendering for selected items."""
    spatial_type = spatial_render_item(obj_type, obj, autogen_context)
    if spatial_type:
        return spatial_type

    # For the cumstom type
    if obj_type == 'type' and isinstance(obj, TheCustomType):
        import_name = obj.__class__.__name__
        autogen_context.imports.add(f"from my_package.custom_types import {import_name}")
        return "%r" % obj

    # default rendering for other objects
    return False



def run_migrations_offline():
    # ...
    context.configure(
        # ...
        render_item=render_item,
        include_object=include_object,
    )
    # ...


def run_migrations_online():
    # ...
    context.configure(
        # ...
        render_item=render_item,
        include_object=include_object,
    )
    # ...

Then the proper imports will be automatically added in the migration scripts.

Add / Drop columns

Some dialects (like SQLite) require some specific management to alter columns of a table. In this case, other dedicated helpers are provided to handle this. For example, if one wants to add and drop columns in a SQLite database, the env.py file should look like the following:


from alembic.autogenerate import rewriter

writer = rewriter.Rewriter()

@writer.rewrites(ops.AddColumnOp) def add_geo_column(context, revision, op): “””This function replaces the default AddColumnOp by a geospatial-specific one.”””

col_type = op.column.type if isinstance(col_type, TypeDecorator):

dialect = context.bind().dialect col_type = col_type.load_dialect_impl(dialect)
if isinstance(col_type, (Geometry, Geography, Raster)):
new_op = AddGeospatialColumn(op.table_name, op.column, op.schema)
else:
new_op = op

return new_op

@writer.rewrites(ops.DropColumnOp) def drop_geo_column(context, revision, op): “””This function replaces the default DropColumnOp by a geospatial-specific one.”””

col_type = op.to_column().type if isinstance(col_type, TypeDecorator):

dialect = context.bind.dialect col_type = col_type.load_dialect_impl(dialect)
if isinstance(col_type, (Geometry, Geography, Raster)):
new_op = DropGeospatialColumn(op.table_name, op.column_name, op.schema)
else:
new_op = op

return new_op

def load_spatialite(dbapi_conn, connection_record):
“””Load SpatiaLite extension in SQLite DB.””” dbapi_conn.enable_load_extension(True) dbapi_conn.load_extension(os.environ[‘SPATIALITE_LIBRARY_PATH’]) dbapi_conn.enable_load_extension(False) dbapi_conn.execute(‘SELECT InitSpatialMetaData()’)
def run_migrations_offline():

# … context.configure(

# … process_revision_directives=writer,

) # …

def run_migrations_online():

# … if connectable.dialect.name == “sqlite”:

# Load the SpatiaLite extension when the engine connects to the DB listen(connectable, ‘connect’, load_spatialite)
with connectable.connect() as connection:

# … context.configure(

# … process_revision_directives=writer,

) # …