diff --git a/.github/workflows/first_startup.yaml b/.github/workflows/first_startup.yaml index 62f4b2647e5f..896734d4b524 100644 --- a/.github/workflows/first_startup.yaml +++ b/.github/workflows/first_startup.yaml @@ -57,9 +57,6 @@ jobs: yarn-lock-file: 'galaxy root/client/yarn.lock' - name: Install tox run: pip install tox - # Use this job to test the latest migrations - - run: wget -q https://github.com/jmchilton/galaxy-downloads/raw/master/db_gx_rev_0141.sqlite - - run: mv db_gx_rev_0141.sqlite 'galaxy root'/database/universe.sqlite - name: run tests run: tox -e first_startup working-directory: 'galaxy root' diff --git a/.github/workflows/unit-postgres.yaml b/.github/workflows/unit-postgres.yaml new file mode 100644 index 000000000000..6a7d4fd1d165 --- /dev/null +++ b/.github/workflows/unit-postgres.yaml @@ -0,0 +1,56 @@ +name: Unit w/postgres tests +on: + push: + paths-ignore: + - 'client/**' + - 'doc/**' + pull_request: + paths-ignore: + - 'client/**' + - 'doc/**' +env: + GALAXY_TEST_DBURI: 'postgresql://postgres:postgres@localhost:5432/postgres?client_encoding=utf8' # using postgres as the db +concurrency: + group: py-unit-${{ github.ref }} + cancel-in-progress: true +jobs: + test: + name: Test + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ['3.7'] + services: + postgres: + image: postgres:13 + env: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + POSTGRES_DB: postgres + ports: + - 5432:5432 + steps: + - uses: actions/checkout@v2 + with: + path: 'galaxy root' + - uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Get full Python version + id: full-python-version + shell: bash + run: echo ::set-output name=version::$(python -c "import sys; print('-'.join(str(v) for v in sys.version_info))") + - name: Cache pip dir + uses: actions/cache@v2 + with: + path: ~/.cache/pip + key: pip-cache-${{ matrix.python-version }}-${{ hashFiles('galaxy root/requirements.txt') }} + - name: Cache galaxy venv + uses: actions/cache@v2 + with: + path: .venv + key: gxy-venv-${{ runner.os }}-${{ steps.full-python-version.outputs.version }}-${{ hashFiles('galaxy root/requirements.txt') }}-unitdb + - name: Run tests + run: ./run_tests.sh -unit test/unit/data/model/migrations/test_migrations.py + working-directory: 'galaxy root' diff --git a/create_db.sh b/create_db.sh index 3c34dd6c3e57..690d0c6f8c89 100755 --- a/create_db.sh +++ b/create_db.sh @@ -1,5 +1,21 @@ #!/bin/sh +####### +# Use this script to verify the state of the Galaxy and Tool Shed Install +# database(s). If the database does not exist or is empty, it will be created +# and initialized. +# (Use create_toolshed_db.sh to create and initialize a new +# Tool Shed database.) +# +# To pass a galaxy config file, use `--galaxy-config` +# +# You may also override the galaxy database url and/or the +# tool shed install database url, as well as the database_template +# and database_encoding configuration options with env vars: +# GALAXY_CONFIG_OVERRIDE_DATABASE_CONNECTION=my-db-url ./create_db.sh +# GALAXY_INSTALL_CONFIG_OVERRIDE_DATABASE_CONNECTION=my-other-db-url ./create_db.sh +####### + cd "$(dirname "$0")" . ./scripts/common_startup_functions.sh diff --git a/create_toolshed_db.sh b/create_toolshed_db.sh new file mode 100755 index 000000000000..3d266a3c7678 --- /dev/null +++ b/create_toolshed_db.sh @@ -0,0 +1,16 @@ +#!/bin/sh + +####### +# Use this script to verify the state of the Tool Shed database. +# If the database does not exist or is empty, it will be created +# and initialized. +# (For Galaxy and Tool Shed Install databases, use create_db.sh). +####### + +cd "$(dirname "$0")" + +. ./scripts/common_startup_functions.sh + +setup_python + +python ./scripts/create_toolshed_db.py "$@" tool_shed diff --git a/lib/galaxy/app.py b/lib/galaxy/app.py index c53072a05cd2..b085458ecc85 100644 --- a/lib/galaxy/app.py +++ b/lib/galaxy/app.py @@ -50,22 +50,25 @@ WorkflowContentsManager, WorkflowsManager, ) -from galaxy.model import custom_types +from galaxy.model import ( + custom_types, + mapping, +) from galaxy.model.base import SharedModelMapping from galaxy.model.database_heartbeat import DatabaseHeartbeat -from galaxy.model.database_utils import database_exists -from galaxy.model.mapping import ( - GalaxyModelMapping, - init_models_from_config, +from galaxy.model.database_utils import ( + database_exists, + is_one_database, ) -from galaxy.model.migrate.check import create_or_verify_database +from galaxy.model.mapping import GalaxyModelMapping +from galaxy.model.migrations import verify_databases +from galaxy.model.orm.engine_factory import build_engine from galaxy.model.scoped_session import ( galaxy_scoped_session, install_model_scoped_session, ) from galaxy.model.tags import GalaxyTagHandler from galaxy.model.tool_shed_install import mapping as install_mapping -from galaxy.model.tool_shed_install.migrate.check import create_or_verify_database as tsi_create_or_verify_database from galaxy.objectstore import build_object_store_from_config from galaxy.queue_worker import ( GalaxyQueueWorker, @@ -334,52 +337,70 @@ def _configure_tool_shed_registry(self): else: self.tool_shed_registry = tool_shed_registry.Registry() + def _configure_engines(self, db_url, install_db_url, combined_install_database): + trace_logger = getattr(self, "trace_logger", None) + engine = build_engine( + db_url, + self.config.database_engine_options, + self.config.database_query_profiling_proxy, + trace_logger, + self.config.slow_query_log_threshold, + self.config.thread_local_log, + self.config.database_log_query_counts, + ) + install_engine = None + if not combined_install_database: + install_engine = build_engine(install_db_url, self.config.install_database_engine_options) + return engine, install_engine + def _configure_models(self, check_migrate_databases=False, config_file=None): """Preconditions: object_store must be set on self.""" + # TODO this block doesn't seem to belong in this method + if getattr(self.config, "max_metadata_value_size", None): + custom_types.MAX_METADATA_VALUE_SIZE = self.config.max_metadata_value_size + db_url = self.config.database_connection install_db_url = self.config.install_database_connection - # TODO: Consider more aggressive check here that this is not the same - # database file under the hood. - combined_install_database = not (install_db_url and install_db_url != db_url) - install_db_url = install_db_url or db_url - install_database_options = ( - self.config.database_engine_options - if combined_install_database - else self.config.install_database_engine_options - ) + combined_install_database = is_one_database(db_url, install_db_url) + engine, install_engine = self._configure_engines(db_url, install_db_url, combined_install_database) if self.config.database_wait: self._wait_for_database(db_url) - if getattr(self.config, "max_metadata_value_size", None): - custom_types.MAX_METADATA_VALUE_SIZE = self.config.max_metadata_value_size - if check_migrate_databases: - # Initialize database / check for appropriate schema version. # If this - # is a new installation, we'll restrict the tool migration messaging. - create_or_verify_database( - db_url, - config_file, - self.config.database_engine_options, - app=self, - map_install_models=combined_install_database, - ) - if not combined_install_database: - tsi_create_or_verify_database(install_db_url, install_database_options, app=self) - - self.model = init_models_from_config( - self.config, - map_install_models=combined_install_database, - object_store=self.object_store, - trace_logger=getattr(self, "trace_logger", None), + self._verify_databases(engine, install_engine, combined_install_database) + + self.model = mapping.configure_model_mapping( + self.config.file_path, + self.object_store, + self.config.use_pbkdf2, + engine, + combined_install_database, + self.config.thread_local_log, ) + if combined_install_database: - log.info("Install database targetting Galaxy's database configuration.") + log.info("Install database targeting Galaxy's database configuration.") # TODO this message is ambiguous self.install_model = self.model else: - install_db_url = self.config.install_database_connection + self.install_model = install_mapping.configure_model_mapping(install_engine) log.info(f"Install database using its own connection {install_db_url}") - self.install_model = install_mapping.init(install_db_url, install_database_options) + + def _verify_databases(self, engine, install_engine, combined_install_database): + install_template, install_encoding = None, None + if not combined_install_database: # Otherwise these options are not used. + install_template = getattr(self.config, "install_database_template", None) + install_encoding = getattr(self.config, "install_database_encoding", None) + + verify_databases( + engine, + self.config.database_template, + self.config.database_encoding, + install_engine, + install_template, + install_encoding, + self.config.database_auto_migrate, + ) def _configure_signal_handlers(self, handlers): for sig, handler in handlers.items(): @@ -419,7 +440,6 @@ def __init__(self, fsmon=False, **kwargs) -> None: log.debug("python path is: %s", ", ".join(sys.path)) self.name = "galaxy" self.is_webapp = False - self.new_installation = False # Read config file and check for errors self.config: Any = self._register_singleton(config.Configuration, config.Configuration(**kwargs)) self.config.check() diff --git a/lib/galaxy/config/__init__.py b/lib/galaxy/config/__init__.py index 2e4628fe1aa0..88ff52a44349 100644 --- a/lib/galaxy/config/__init__.py +++ b/lib/galaxy/config/__init__.py @@ -774,7 +774,6 @@ def _process_config(self, kwargs): db_path = self._in_data_dir("universe.sqlite") self.database_connection = f"sqlite:///{db_path}?isolation_level=IMMEDIATE" self.database_engine_options = get_database_engine_options(kwargs) - self.database_create_tables = string_as_bool(kwargs.get("database_create_tables", "True")) self.database_encoding = kwargs.get("database_encoding") # Create new databases with this encoding self.thread_local_log = None if self.enable_per_request_sql_debugging: diff --git a/lib/galaxy/dependencies/dev-requirements.txt b/lib/galaxy/dependencies/dev-requirements.txt index 7da9a7dcbad7..0af704426b4a 100644 --- a/lib/galaxy/dependencies/dev-requirements.txt +++ b/lib/galaxy/dependencies/dev-requirements.txt @@ -4,6 +4,7 @@ a2wsgi==1.4.0; python_version >= "3.6" and python_version < "4.0" adal==1.2.7 aiofiles==0.8.0; python_version >= "3.6" and python_version < "4.0" alabaster==0.7.12; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6" +alembic==1.7.4; python_version >= "3.6" amqp==5.0.9; python_version >= "3.7" anyio==3.5.0; python_version >= "3.7" and python_full_version >= "3.6.2" appdirs==1.4.4 diff --git a/lib/galaxy/dependencies/pinned-requirements.txt b/lib/galaxy/dependencies/pinned-requirements.txt index 0f92993b0d4f..4f2638f8cbc9 100644 --- a/lib/galaxy/dependencies/pinned-requirements.txt +++ b/lib/galaxy/dependencies/pinned-requirements.txt @@ -3,6 +3,7 @@ a2wsgi==1.4.0; python_version >= "3.6" and python_version < "4.0" adal==1.2.7 aiofiles==0.8.0; python_version >= "3.6" and python_version < "4.0" +alembic==1.7.4; python_version >= "3.6" amqp==5.0.9; python_version >= "3.7" anyio==3.5.0; python_version >= "3.7" and python_full_version >= "3.6.2" appdirs==1.4.4 diff --git a/lib/galaxy/model/database_utils.py b/lib/galaxy/model/database_utils.py index ec0312bce658..fcf5f4a4678f 100644 --- a/lib/galaxy/model/database_utils.py +++ b/lib/galaxy/model/database_utils.py @@ -1,5 +1,6 @@ import sqlite3 from contextlib import contextmanager +from typing import Optional from sqlalchemy import create_engine from sqlalchemy.engine.url import make_url @@ -118,3 +119,14 @@ def create(self, encoding, *arg): stmt = f"CREATE DATABASE {database} CHARACTER SET = '{encoding}'" with engine.connect().execution_options(isolation_level="AUTOCOMMIT") as conn: conn.execute(stmt) + + +def is_one_database(db1_url: str, db2_url: Optional[str]): + """ + Check if the arguments refer to one database. This will be true + if only one argument is passed, or if the urls are the same. + URLs are strings, so sameness is determined via string comparison. + """ + # TODO: Consider more aggressive check here that this is not the same + # database file under the hood. + return not (db1_url and db2_url and db1_url != db2_url) diff --git a/lib/galaxy/model/mapping.py b/lib/galaxy/model/mapping.py index 21f173476438..298b9daed573 100644 --- a/lib/galaxy/model/mapping.py +++ b/lib/galaxy/model/mapping.py @@ -1,9 +1,3 @@ -""" -This module no longer contains the mapping of data model classes to the -relational database. -The module will be revised during migration from SQLAlchemy Migrate to Alembic. -""" - import logging from threading import local from typing import ( @@ -15,9 +9,9 @@ from galaxy.config import GalaxyAppConfiguration from galaxy.model import mapper_registry from galaxy.model.base import SharedModelMapping -from galaxy.model.migrate.triggers.update_audit_table import install as install_timestamp_triggers from galaxy.model.orm.engine_factory import build_engine from galaxy.model.security import GalaxyRBACAgent +from galaxy.model.triggers.update_audit_table import install as install_timestamp_triggers from galaxy.model.view.utils import install_views log = logging.getLogger(__name__) @@ -28,7 +22,6 @@ class GalaxyModelMapping(SharedModelMapping): security_agent: GalaxyRBACAgent thread_local_log: Optional[local] - create_tables: bool User: Type GalaxySession: Type @@ -47,16 +40,7 @@ def init( thread_local_log: Optional[local] = None, log_query_counts=False, ) -> GalaxyModelMapping: - """Connect mappings to the database""" - if engine_options is None: - engine_options = {} - # Connect dataset to the file path - model.Dataset.file_path = file_path - # Connect dataset to object store - model.Dataset.object_store = object_store - # Use PBKDF2 password hashing? - model.User.use_pbkdf2 = use_pbkdf2 - # Load the appropriate db module + # Build engine engine = build_engine( url, engine_options, @@ -67,27 +51,53 @@ def init( log_query_counts=log_query_counts, ) + # Create tables if needed + if create_tables: + mapper_registry.metadata.create_all(bind=engine) + create_additional_database_objects(engine) + if map_install_models: + from galaxy.model.tool_shed_install import mapping as install_mapping # noqa: F401 + + install_mapping.create_database_objects(engine) + + # Configure model, build ModelMapping + return configure_model_mapping(file_path, object_store, use_pbkdf2, engine, map_install_models, thread_local_log) + + +def create_additional_database_objects(engine): + install_timestamp_triggers(engine) + install_views(engine) + + +def configure_model_mapping( + file_path, + object_store, + use_pbkdf2, + engine, + map_install_models, + thread_local_log, +): + _configure_model(file_path, object_store, use_pbkdf2) + return _build_model_mapping(engine, map_install_models, thread_local_log) + + +def _configure_model(file_path, object_store, use_pbkdf2): + model.Dataset.file_path = file_path + model.Dataset.object_store = object_store + model.User.use_pbkdf2 = use_pbkdf2 + + +def _build_model_mapping(engine, map_install_models, thread_local_log): model_modules = [model] if map_install_models: - import galaxy.model.tool_shed_install.mapping # noqa: F401 from galaxy.model import tool_shed_install - galaxy.model.tool_shed_install.mapping.init(url=url, engine_options=engine_options, create_tables=create_tables) model_modules.append(tool_shed_install) - result = GalaxyModelMapping(model_modules, engine=engine) - - # Create tables if needed - if create_tables: - metadata.create_all(bind=engine) - install_timestamp_triggers(engine) - install_views(engine) - - result.create_tables = create_tables - # load local galaxy security policy - result.security_agent = GalaxyRBACAgent(result) - result.thread_local_log = thread_local_log - return result + model_mapping = GalaxyModelMapping(model_modules, engine=engine) + model_mapping.security_agent = GalaxyRBACAgent(model_mapping) + model_mapping.thread_local_log = thread_local_log + return model_mapping def init_models_from_config( diff --git a/lib/galaxy/model/migrate/check.py b/lib/galaxy/model/migrate/check.py deleted file mode 100644 index bbdabad360c9..000000000000 --- a/lib/galaxy/model/migrate/check.py +++ /dev/null @@ -1,192 +0,0 @@ -import logging -import os.path -import sys - -from migrate.versioning import ( - repository, - schema, -) -from sqlalchemy import ( - create_engine, - MetaData, - Table, -) -from sqlalchemy.exc import NoSuchTableError - -from galaxy.model import mapping -from galaxy.model.database_utils import ( - create_database, - database_exists, -) - -log = logging.getLogger(__name__) - -# path relative to galaxy -migrate_repository_directory = os.path.abspath(os.path.dirname(__file__)).replace(os.getcwd() + os.path.sep, "", 1) -migrate_repository = repository.Repository(migrate_repository_directory) - - -def create_or_verify_database(url, galaxy_config_file, engine_options=None, app=None, map_install_models=False): - """ - Check that the database is use-able, possibly creating it if empty (this is - the only time we automatically create tables, otherwise we force the - user to do it using the management script so they can create backups). - - 1) Empty database --> initialize with latest version and return - 2) Database older than migration support --> fail and require manual update - 3) Database at state where migrate support introduced --> add version control information but make no changes (might still require manual update) - 4) Database versioned but out of date --> fail with informative message, user must run "sh manage_db.sh upgrade" - """ - # Create the base database if it doesn't yet exist. - engine_options = engine_options or {} - new_database = not database_exists(url) - if new_database: - template = app and getattr(app.config, "database_template", None) - encoding = app and getattr(app.config, "database_encoding", None) - create_kwds = {} - - message = f"Creating database for URI [{url}]" - if template: - message += f" from template [{template}]" - create_kwds["template"] = template - if encoding: - message += f" with encoding [{encoding}]" - create_kwds["encoding"] = encoding - log.info(message) - create_database(url, **create_kwds) - - # Create engine and metadata - engine = create_engine(url, **engine_options) - - def migrate(): - try: - # Declare the database to be under a repository's version control - db_schema = schema.ControlledSchema.create(engine, migrate_repository) - except Exception: - # The database is already under version control - db_schema = schema.ControlledSchema(engine, migrate_repository) - # Apply all scripts to get to current version - migrate_to_current_version(engine, db_schema) - - def migrate_from_scratch(): - if not os.environ.get("GALAXY_TEST_FORCE_DATABASE_MIGRATION"): - log.info("Creating new database from scratch, skipping migrations") - current_version = migrate_repository.version().version - mapping.init(file_path="/tmp", url=url, map_install_models=map_install_models, create_tables=True) - schema.ControlledSchema.create(engine, migrate_repository, version=current_version) - db_schema = schema.ControlledSchema(engine, migrate_repository) - assert db_schema.version == current_version - migrate() - if app: - # skips the tool migration process. - app.new_installation = True - - meta = MetaData(bind=engine) - if new_database: - migrate_from_scratch() - return - elif app and getattr(app.config, "database_auto_migrate", False): - migrate() - return - - # Try to load dataset table - try: - Table("dataset", meta, autoload=True) - except NoSuchTableError: - # No 'dataset' table means a completely uninitialized database. - log.info("No database, initializing") - migrate_from_scratch() - return - try: - hda_table = Table("history_dataset_association", meta, autoload=True) - except NoSuchTableError: - raise Exception( - "Your database is older than hg revision 1464:c7acaa1bb88f and will need to be updated manually" - ) - # There is a 'history_dataset_association' table, so we (hopefully) have - # version 1 of the database, but without the migrate_version table. This - # happens if the user has a build from right before migration was added. - # Verify that this is true, if it is any older they'll have to update - # manually - if "copied_from_history_dataset_association_id" not in hda_table.c: - # The 'copied_from_history_dataset_association_id' column was added in - # rev 1464:c7acaa1bb88f. This is the oldest revision we currently do - # automated versioning for, so stop here - raise Exception( - "Your database is older than hg revision 1464:c7acaa1bb88f and will need to be updated manually" - ) - # At revision 1464:c7acaa1bb88f or greater (database version 1), make sure - # that the db has version information. This is the trickiest case -- we - # have a database but no version control, and are assuming it is a certain - # version. If the user has postion version 1 changes this could cause - # problems - try: - Table("migrate_version", meta, autoload=True) - except NoSuchTableError: - # The database exists but is not yet under migrate version control, so init with version 1 - log.info("Adding version control to existing database") - try: - Table("metadata_file", meta, autoload=True) - schema.ControlledSchema.create(engine, migrate_repository, version=2) - except NoSuchTableError: - schema.ControlledSchema.create(engine, migrate_repository, version=1) - # Verify that the code and the DB are in sync - db_schema = schema.ControlledSchema(engine, migrate_repository) - if migrate_repository.versions.latest != db_schema.version: - config_arg = "" - if ( - galaxy_config_file - and os.path.abspath(os.path.join(os.getcwd(), "config", "galaxy.ini")) != galaxy_config_file - ): - config_arg = f" -c {galaxy_config_file.replace(os.path.abspath(os.getcwd()), '.')}" - expect_msg = "Your database has version '%d' but this code expects version '%d'" % ( - db_schema.version, - migrate_repository.versions.latest, - ) - instructions = "" - if db_schema.version > migrate_repository.versions.latest: - instructions = "To downgrade the database schema you have to checkout the Galaxy version that you were running previously. " - cmd_msg = "sh manage_db.sh%s downgrade %d" % (config_arg, migrate_repository.versions.latest) - else: - cmd_msg = f"sh manage_db.sh{config_arg} upgrade" - backup_msg = f"Please backup your database and then migrate the database schema by running '{cmd_msg}'." - allow_future_database = os.environ.get("GALAXY_ALLOW_FUTURE_DATABASE", False) - if db_schema.version > migrate_repository.versions.latest and allow_future_database: - log.warning( - "WARNING: Database is from the future, but GALAXY_ALLOW_FUTURE_DATABASE is set, so Galaxy will continue to start." - ) - else: - raise Exception(f"{expect_msg}. {instructions}{backup_msg}") - else: - log.info("At database version %d" % db_schema.version) - - -def migrate_to_current_version(engine, schema): - # Changes to get to current version - try: - changeset = schema.changeset(None) - except Exception as e: - log.error(f"Problem determining migration changeset for engine [{engine}]") - raise e - for ver, change in changeset: - nextver = ver + changeset.step - log.info(f"Migrating {ver} -> {nextver}... ") - old_stdout = sys.stdout - - class FakeStdout: - def __init__(self): - self.buffer = [] - - def write(self, s): - self.buffer.append(s) - - def flush(self): - pass - - sys.stdout = FakeStdout() - try: - schema.runchange(ver, change, changeset.step) - finally: - for message in "".join(sys.stdout.buffer).split("\n"): - log.info(message) - sys.stdout = old_stdout diff --git a/lib/galaxy/model/migrate/migrate.cfg b/lib/galaxy/model/migrate/migrate.cfg deleted file mode 100644 index 3fd7400ff420..000000000000 --- a/lib/galaxy/model/migrate/migrate.cfg +++ /dev/null @@ -1,20 +0,0 @@ -[db_settings] -# Used to identify which repository this database is versioned under. -# You can use the name of your project. -repository_id=Galaxy - -# The name of the database table used to track the schema version. -# This name shouldn't already be used by your project. -# If this is changed once a database is under version control, you'll need to -# change the table name in each database too. -version_table=migrate_version - -# When committing a change script, Migrate will attempt to generate the -# sql for all supported databases; normally, if one of them fails - probably -# because you don't have that database installed - it is ignored and the -# commit continues, perhaps ending successfully. -# Databases in this list MUST compile successfully during a commit, or the -# entire commit will fail. List the databases your application will actually -# be using to ensure your updates to that database work properly. -# This must be a list; example: ['postgres','sqlite'] -required_dbs=[] diff --git a/lib/galaxy/model/migrate/versions/0001_initial_tables.py b/lib/galaxy/model/migrate/versions/0001_initial_tables.py deleted file mode 100644 index f9f4fcf116d9..000000000000 --- a/lib/galaxy/model/migrate/versions/0001_initial_tables.py +++ /dev/null @@ -1,283 +0,0 @@ -import datetime -import logging - -from sqlalchemy import ( - Boolean, - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Numeric, - String, - Table, - TEXT, -) - -# Need our custom types, but don't import anything else from model -from galaxy.model.custom_types import ( - JSONType, - MetadataType, - TrimmedString, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - -# Tables as of changeset 1464:c7acaa1bb88f -User_table = Table( - "galaxy_user", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("email", TrimmedString(255), nullable=False), - Column("password", TrimmedString(40), nullable=False), - Column("external", Boolean, default=False), -) - -History_table = Table( - "history", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, index=True, default=now, onupdate=now), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("name", TrimmedString(255)), - Column("hid_counter", Integer, default=1), - Column("deleted", Boolean, index=True, default=False), - Column("purged", Boolean, index=True, default=False), - Column("genome_build", TrimmedString(40)), -) - -HistoryDatasetAssociation_table = Table( - "history_dataset_association", - metadata, - Column("id", Integer, primary_key=True), - Column("history_id", Integer, ForeignKey("history.id"), index=True), - Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column( - "copied_from_history_dataset_association_id", - Integer, - ForeignKey("history_dataset_association.id"), - nullable=True, - ), - Column("hid", Integer), - Column("name", TrimmedString(255)), - Column("info", TrimmedString(255)), - Column("blurb", TrimmedString(255)), - Column("peek", TEXT), - Column("extension", TrimmedString(64)), - Column("metadata", MetadataType, key="_metadata"), - Column("parent_id", Integer, ForeignKey("history_dataset_association.id"), nullable=True), - Column("designation", TrimmedString(255)), - Column("deleted", Boolean, index=True, default=False), - Column("visible", Boolean), -) - -Dataset_table = Table( - "dataset", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, index=True, default=now, onupdate=now), - Column("state", TrimmedString(64)), - Column("deleted", Boolean, index=True, default=False), - Column("purged", Boolean, index=True, default=False), - Column("purgable", Boolean, default=True), - Column("external_filename", TEXT), - Column("_extra_files_path", TEXT), - Column("file_size", Numeric(15, 0)), -) - -ImplicitlyConvertedDatasetAssociation_table = Table( - "implicitly_converted_dataset_association", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("hda_id", Integer, ForeignKey("history_dataset_association.id"), index=True, nullable=True), - Column("hda_parent_id", Integer, ForeignKey("history_dataset_association.id"), index=True), - Column("deleted", Boolean, index=True, default=False), - Column("metadata_safe", Boolean, index=True, default=True), - Column("type", TrimmedString(255)), -) - -ValidationError_table = Table( - "validation_error", - metadata, - Column("id", Integer, primary_key=True), - Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True), - Column("message", TrimmedString(255)), - Column("err_type", TrimmedString(64)), - Column("attributes", TEXT), -) - -Job_table = Table( - "job", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("history_id", Integer, ForeignKey("history.id"), index=True), - Column("tool_id", String(255)), - Column("tool_version", TEXT, default="1.0.0"), - Column("state", String(64)), - Column("info", TrimmedString(255)), - Column("command_line", TEXT), - Column("param_filename", String(1024)), - Column("runner_name", String(255)), - Column("stdout", TEXT), - Column("stderr", TEXT), - Column("traceback", TEXT), - Column("session_id", Integer, ForeignKey("galaxy_session.id"), index=True, nullable=True), - Column("job_runner_name", String(255)), - Column("job_runner_external_id", String(255)), -) - -JobParameter_table = Table( - "job_parameter", - metadata, - Column("id", Integer, primary_key=True), - Column("job_id", Integer, ForeignKey("job.id"), index=True), - Column("name", String(255)), - Column("value", TEXT), -) - -JobToInputDatasetAssociation_table = Table( - "job_to_input_dataset", - metadata, - Column("id", Integer, primary_key=True), - Column("job_id", Integer, ForeignKey("job.id"), index=True), - Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True), - Column("name", String(255)), -) - -JobToOutputDatasetAssociation_table = Table( - "job_to_output_dataset", - metadata, - Column("id", Integer, primary_key=True), - Column("job_id", Integer, ForeignKey("job.id"), index=True), - Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True), - Column("name", String(255)), -) - -Event_table = Table( - "event", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("history_id", Integer, ForeignKey("history.id"), index=True, nullable=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=True), - Column("message", TrimmedString(1024)), - Column("session_id", Integer, ForeignKey("galaxy_session.id"), index=True, nullable=True), - Column("tool_id", String(255)), -) - -GalaxySession_table = Table( - "galaxy_session", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=True), - Column("remote_host", String(255)), - Column("remote_addr", String(255)), - Column("referer", TEXT), - Column("current_history_id", Integer, ForeignKey("history.id"), nullable=True), - Column("session_key", TrimmedString(255), index=True, unique=True), - Column("is_valid", Boolean, default=False), - Column("prev_session_id", Integer), -) - -GalaxySessionToHistoryAssociation_table = Table( - "galaxy_session_to_history", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("session_id", Integer, ForeignKey("galaxy_session.id"), index=True), - Column("history_id", Integer, ForeignKey("history.id"), index=True), -) - -StoredWorkflow_table = Table( - "stored_workflow", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=False), - Column( - "latest_workflow_id", - Integer, - ForeignKey("workflow.id", use_alter=True, name="stored_workflow_latest_workflow_id_fk"), - index=True, - ), - Column("name", TEXT), - Column("deleted", Boolean, default=False), -) - -Workflow_table = Table( - "workflow", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True, nullable=False), - Column("name", TEXT), - Column("has_cycles", Boolean), - Column("has_errors", Boolean), -) - -WorkflowStep_table = Table( - "workflow_step", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("workflow_id", Integer, ForeignKey("workflow.id"), index=True, nullable=False), - Column("type", String(64)), - Column("tool_id", TEXT), - Column("tool_version", TEXT), - Column("tool_inputs", JSONType), - Column("tool_errors", JSONType), - Column("position", JSONType), - Column("config", JSONType), - Column("order_index", Integer), -) - -WorkflowStepConnection_table = Table( - "workflow_step_connection", - metadata, - Column("id", Integer, primary_key=True), - Column("output_step_id", Integer, ForeignKey("workflow_step.id"), index=True), - Column("input_step_id", Integer, ForeignKey("workflow_step.id"), index=True), - Column("output_name", TEXT), - Column("input_name", TEXT), -) - -StoredWorkflowUserShareAssociation_table = Table( - "stored_workflow_user_share_connection", - metadata, - Column("id", Integer, primary_key=True), - Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), -) - -StoredWorkflowMenuEntry_table = Table( - "stored_workflow_menu_entry", - metadata, - Column("id", Integer, primary_key=True), - Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("order_index", Integer), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.create_all() diff --git a/lib/galaxy/model/migrate/versions/0002_metadata_file_table.py b/lib/galaxy/model/migrate/versions/0002_metadata_file_table.py deleted file mode 100644 index b61fe4bf5802..000000000000 --- a/lib/galaxy/model/migrate/versions/0002_metadata_file_table.py +++ /dev/null @@ -1,53 +0,0 @@ -""" -""" - -import datetime -import logging - -from sqlalchemy import ( - Boolean, - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, - TEXT, -) - -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - -# New table in changeset 1568:0b022adfdc34 -MetadataFile_table = Table( - "metadata_file", - metadata, - Column("id", Integer, primary_key=True), - Column("name", TEXT), - Column("hda_id", Integer, ForeignKey("history_dataset_association.id"), index=True, nullable=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, index=True, default=now, onupdate=now), - Column("deleted", Boolean, index=True, default=False), - Column("purged", Boolean, index=True, default=False), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(MetadataFile_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_table(MetadataFile_table) diff --git a/lib/galaxy/model/migrate/versions/0003_security_and_libraries.py b/lib/galaxy/model/migrate/versions/0003_security_and_libraries.py deleted file mode 100644 index 27b09337bca9..000000000000 --- a/lib/galaxy/model/migrate/versions/0003_security_and_libraries.py +++ /dev/null @@ -1,745 +0,0 @@ -""" -""" - -import datetime -import logging - -from migrate import ForeignKeyConstraint -from sqlalchemy import ( - Boolean, - Column, - DateTime, - ForeignKey, - Index, - Integer, - MetaData, - String, - Table, - TEXT, -) - -from galaxy.model.custom_types import ( - JSONType, - MetadataType, - TrimmedString, -) -from galaxy.model.migrate.versions.util import ( - add_column, - add_index, - drop_column, - drop_index, - drop_table, - engine_false, - localtimestamp, - nextval, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - -# New tables as of changeset 2341:5498ac35eedd -Group_table = Table( - "galaxy_group", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("name", String(255), index=True, unique=True), - Column("deleted", Boolean, index=True, default=False), -) - -UserGroupAssociation_table = Table( - "user_group_association", - metadata, - Column("id", Integer, primary_key=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("group_id", Integer, ForeignKey("galaxy_group.id"), index=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), -) - -UserRoleAssociation_table = Table( - "user_role_association", - metadata, - Column("id", Integer, primary_key=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("role_id", Integer, ForeignKey("role.id"), index=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), -) - -GroupRoleAssociation_table = Table( - "group_role_association", - metadata, - Column("id", Integer, primary_key=True), - Column("group_id", Integer, ForeignKey("galaxy_group.id"), index=True), - Column("role_id", Integer, ForeignKey("role.id"), index=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), -) - -Role_table = Table( - "role", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("name", String(255), index=True, unique=True), - Column("description", TEXT), - Column("type", String(40), index=True), - Column("deleted", Boolean, index=True, default=False), -) - -DatasetPermissions_table = Table( - "dataset_permissions", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("action", TEXT), - Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True), - Column("role_id", Integer, ForeignKey("role.id"), index=True), -) - -LibraryPermissions_table = Table( - "library_permissions", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("action", TEXT), - Column("library_id", Integer, ForeignKey("library.id"), nullable=True, index=True), - Column("role_id", Integer, ForeignKey("role.id"), index=True), -) - -LibraryFolderPermissions_table = Table( - "library_folder_permissions", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("action", TEXT), - Column("library_folder_id", Integer, ForeignKey("library_folder.id"), nullable=True, index=True), - Column("role_id", Integer, ForeignKey("role.id"), index=True), -) - -LibraryDatasetPermissions_table = Table( - "library_dataset_permissions", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("action", TEXT), - Column("library_dataset_id", Integer, ForeignKey("library_dataset.id"), nullable=True, index=True), - Column("role_id", Integer, ForeignKey("role.id"), index=True), -) - -LibraryDatasetDatasetAssociationPermissions_table = Table( - "library_dataset_dataset_association_permissions", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("action", TEXT), - Column( - "library_dataset_dataset_association_id", - Integer, - ForeignKey("library_dataset_dataset_association.id"), - nullable=True, - ), - Column("role_id", Integer, ForeignKey("role.id"), index=True), -) -Index( - "ix_lddap_library_dataset_dataset_association_id", - LibraryDatasetDatasetAssociationPermissions_table.c.library_dataset_dataset_association_id, -) - -LibraryItemInfoPermissions_table = Table( - "library_item_info_permissions", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("action", TEXT), - Column("library_item_info_id", Integer, ForeignKey("library_item_info.id"), nullable=True, index=True), - Column("role_id", Integer, ForeignKey("role.id"), index=True), -) - -LibraryItemInfoTemplatePermissions_table = Table( - "library_item_info_template_permissions", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("action", TEXT), - Column("library_item_info_template_id", Integer, ForeignKey("library_item_info_template.id"), nullable=True), - Column("role_id", Integer, ForeignKey("role.id"), index=True), -) -Index( - "ix_liitp_library_item_info_template_id", LibraryItemInfoTemplatePermissions_table.c.library_item_info_template_id -) - -DefaultUserPermissions_table = Table( - "default_user_permissions", - metadata, - Column("id", Integer, primary_key=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("action", TEXT), - Column("role_id", Integer, ForeignKey("role.id"), index=True), -) - -DefaultHistoryPermissions_table = Table( - "default_history_permissions", - metadata, - Column("id", Integer, primary_key=True), - Column("history_id", Integer, ForeignKey("history.id"), index=True), - Column("action", TEXT), - Column("role_id", Integer, ForeignKey("role.id"), index=True), -) - -LibraryDataset_table = Table( - "library_dataset", - metadata, - Column("id", Integer, primary_key=True), - Column( - "library_dataset_dataset_association_id", - Integer, - ForeignKey( - "library_dataset_dataset_association.id", use_alter=True, name="library_dataset_dataset_association_id_fk" - ), - nullable=True, - index=True, - ), # current version of dataset, if null, there is not a current version selected - Column("folder_id", Integer, ForeignKey("library_folder.id"), index=True), - Column("order_id", Integer), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column( - "name", TrimmedString(255), key="_name" - ), # when not None/null this will supercede display in library (but not when imported into user's history?) - Column( - "info", TrimmedString(255), key="_info" - ), # when not None/null this will supercede display in library (but not when imported into user's history?) - Column("deleted", Boolean, index=True, default=False), -) - -LibraryDatasetDatasetAssociation_table = Table( - "library_dataset_dataset_association", - metadata, - Column("id", Integer, primary_key=True), - Column("library_dataset_id", Integer, ForeignKey("library_dataset.id"), index=True), - Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column( - "copied_from_history_dataset_association_id", - Integer, - ForeignKey( - "history_dataset_association.id", use_alter=True, name="history_dataset_association_dataset_id_fkey" - ), - nullable=True, - ), - Column( - "copied_from_library_dataset_dataset_association_id", - Integer, - ForeignKey( - "library_dataset_dataset_association.id", use_alter=True, name="library_dataset_dataset_association_id_fkey" - ), - nullable=True, - ), - Column("name", TrimmedString(255)), - Column("info", TrimmedString(255)), - Column("blurb", TrimmedString(255)), - Column("peek", TEXT), - Column("extension", TrimmedString(64)), - Column("metadata", MetadataType, key="_metadata"), - Column("parent_id", Integer, ForeignKey("library_dataset_dataset_association.id"), nullable=True), - Column("designation", TrimmedString(255)), - Column("deleted", Boolean, index=True, default=False), - Column("visible", Boolean), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("message", TrimmedString(255)), -) - -Library_table = Table( - "library", - metadata, - Column("id", Integer, primary_key=True), - Column("root_folder_id", Integer, ForeignKey("library_folder.id"), index=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("name", String(255), index=True), - Column("deleted", Boolean, index=True, default=False), - Column("purged", Boolean, index=True, default=False), - Column("description", TEXT), -) - -LibraryFolder_table = Table( - "library_folder", - metadata, - Column("id", Integer, primary_key=True), - Column("parent_id", Integer, ForeignKey("library_folder.id"), nullable=True, index=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("name", TEXT), - Column("description", TEXT), - Column("order_id", Integer), - Column("item_count", Integer), - Column("deleted", Boolean, index=True, default=False), - Column("purged", Boolean, index=True, default=False), - Column("genome_build", TrimmedString(40)), -) - -LibraryItemInfoTemplateElement_table = Table( - "library_item_info_template_element", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("optional", Boolean, index=True, default=True), - Column("deleted", Boolean, index=True, default=False), - Column("name", TEXT), - Column("description", TEXT), - Column("type", TEXT, default="string"), - Column("order_id", Integer), - Column("options", JSONType), - Column("library_item_info_template_id", Integer, ForeignKey("library_item_info_template.id")), -) -Index("ix_liite_library_item_info_template_id", LibraryItemInfoTemplateElement_table.c.library_item_info_template_id) - -LibraryItemInfoTemplate_table = Table( - "library_item_info_template", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("optional", Boolean, index=True, default=True), - Column("deleted", Boolean, index=True, default=False), - Column("name", TEXT), - Column("description", TEXT), - Column("item_count", Integer, default=0), -) - -LibraryInfoTemplateAssociation_table = Table( - "library_info_template_association", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("library_id", Integer, ForeignKey("library.id"), nullable=True, index=True), - Column("library_item_info_template_id", Integer, ForeignKey("library_item_info_template.id")), -) -Index("ix_lita_library_item_info_template_id", LibraryInfoTemplateAssociation_table.c.library_item_info_template_id) - -LibraryFolderInfoTemplateAssociation_table = Table( - "library_folder_info_template_association", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("library_folder_id", Integer, ForeignKey("library_folder.id"), nullable=True, index=True), - Column("library_item_info_template_id", Integer, ForeignKey("library_item_info_template.id")), -) -Index( - "ix_lfita_library_item_info_template_id", LibraryFolderInfoTemplateAssociation_table.c.library_item_info_template_id -) - -LibraryDatasetInfoTemplateAssociation_table = Table( - "library_dataset_info_template_association", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("library_dataset_id", Integer, ForeignKey("library_dataset.id"), nullable=True, index=True), - Column("library_item_info_template_id", Integer, ForeignKey("library_item_info_template.id")), -) -Index( - "ix_ldita_library_item_info_template_id", - LibraryDatasetInfoTemplateAssociation_table.c.library_item_info_template_id, -) - -LibraryDatasetDatasetInfoTemplateAssociation_table = Table( - "library_dataset_dataset_info_template_association", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column( - "library_dataset_dataset_association_id", - Integer, - ForeignKey("library_dataset_dataset_association.id"), - nullable=True, - ), - Column("library_item_info_template_id", Integer, ForeignKey("library_item_info_template.id")), -) -Index( - "ix_lddita_library_dataset_dataset_association_id", - LibraryDatasetDatasetInfoTemplateAssociation_table.c.library_dataset_dataset_association_id, -) -Index( - "ix_lddita_library_item_info_template_id", - LibraryDatasetDatasetInfoTemplateAssociation_table.c.library_item_info_template_id, -) - -LibraryItemInfoElement_table = Table( - "library_item_info_element", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("contents", JSONType), - Column("library_item_info_id", Integer, ForeignKey("library_item_info.id"), index=True), - Column("library_item_info_template_element_id", Integer, ForeignKey("library_item_info_template_element.id")), -) -Index( - "ix_liie_library_item_info_template_element_id", - LibraryItemInfoElement_table.c.library_item_info_template_element_id, -) - -LibraryItemInfo_table = Table( - "library_item_info", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("deleted", Boolean, index=True, default=False), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), nullable=True, index=True), - Column( - "library_item_info_template_id", Integer, ForeignKey("library_item_info_template.id"), nullable=True, index=True - ), -) - -LibraryInfoAssociation_table = Table( - "library_info_association", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("library_id", Integer, ForeignKey("library.id"), nullable=True, index=True), - Column("library_item_info_id", Integer, ForeignKey("library_item_info.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), nullable=True, index=True), -) - -LibraryFolderInfoAssociation_table = Table( - "library_folder_info_association", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("library_folder_id", Integer, ForeignKey("library_folder.id"), nullable=True, index=True), - Column("library_item_info_id", Integer, ForeignKey("library_item_info.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), nullable=True, index=True), -) - -LibraryDatasetInfoAssociation_table = Table( - "library_dataset_info_association", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("library_dataset_id", Integer, ForeignKey("library_dataset.id"), nullable=True, index=True), - Column("library_item_info_id", Integer, ForeignKey("library_item_info.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), nullable=True, index=True), -) - -LibraryDatasetDatasetInfoAssociation_table = Table( - "library_dataset_dataset_info_association", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column( - "library_dataset_dataset_association_id", - Integer, - ForeignKey("library_dataset_dataset_association.id"), - nullable=True, - ), - Column("library_item_info_id", Integer, ForeignKey("library_item_info.id")), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), nullable=True, index=True), -) -Index( - "ix_lddia_library_dataset_dataset_association_id", - LibraryDatasetDatasetInfoAssociation_table.c.library_dataset_dataset_association_id, -) -Index("ix_lddia_library_item_info_id", LibraryDatasetDatasetInfoAssociation_table.c.library_item_info_id) - -JobExternalOutputMetadata_table = Table( - "job_external_output_metadata", - metadata, - Column("id", Integer, primary_key=True), - Column("job_id", Integer, ForeignKey("job.id"), index=True), - Column( - "history_dataset_association_id", - Integer, - ForeignKey("history_dataset_association.id"), - index=True, - nullable=True, - ), - Column( - "library_dataset_dataset_association_id", - Integer, - ForeignKey("library_dataset_dataset_association.id"), - nullable=True, - ), - Column("filename_in", String(255)), - Column("filename_out", String(255)), - Column("filename_results_code", String(255)), - Column("filename_kwds", String(255)), - Column("job_runner_external_pid", String(255)), -) -Index( - "ix_jeom_library_dataset_dataset_association_id", - JobExternalOutputMetadata_table.c.library_dataset_dataset_association_id, -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - # Add 2 new columns to the galaxy_user table - User_table = Table("galaxy_user", metadata, autoload=True) - col = Column("deleted", Boolean, index=True, default=False) - add_column(col, User_table, metadata, index_name="ix_galaxy_user_deleted") - col = Column("purged", Boolean, index=True, default=False) - add_column(col, User_table, metadata, index_name="ix_galaxy_user_purged") - # Add 1 new column to the history_dataset_association table - HistoryDatasetAssociation_table = Table("history_dataset_association", metadata, autoload=True) - col = Column("copied_from_library_dataset_dataset_association_id", Integer, nullable=True) - add_column(col, HistoryDatasetAssociation_table, metadata) - # Add 1 new column to the metadata_file table - MetadataFile_table = Table("metadata_file", metadata, autoload=True) - col = Column("lda_id", Integer, index=True, nullable=True) - add_column(col, MetadataFile_table, metadata, index_name="ix_metadata_file_lda_id") - # Add 1 new column to the stored_workflow table - changeset 2328 - StoredWorkflow_table = Table( - "stored_workflow", - metadata, - Column( - "latest_workflow_id", - Integer, - ForeignKey("workflow.id", use_alter=True, name="stored_workflow_latest_workflow_id_fk"), - index=True, - ), - autoload=True, - extend_existing=True, - ) - col = Column("importable", Boolean, default=False) - add_column(col, StoredWorkflow_table, metadata) - # Create an index on the Job.state column - changeset 2192 - add_index("ix_job_state", "job", "state", metadata) - # Add all of the new tables above - metadata.create_all() - # Add 1 foreign key constraint to the history_dataset_association table - LibraryDatasetDatasetAssociation_table = Table("library_dataset_dataset_association", metadata, autoload=True) - try: - cons = ForeignKeyConstraint( - [HistoryDatasetAssociation_table.c.copied_from_library_dataset_dataset_association_id], - [LibraryDatasetDatasetAssociation_table.c.id], - name="history_dataset_association_copied_from_library_dataset_da_fkey", - ) - # Create the constraint - cons.create() - except Exception: - log.exception( - "Adding foreign key constraint 'history_dataset_association_copied_from_library_dataset_da_fkey' to table 'history_dataset_association' failed." - ) - # Add 1 foreign key constraint to the metadata_file table - LibraryDatasetDatasetAssociation_table = Table("library_dataset_dataset_association", metadata, autoload=True) - if migrate_engine.name != "sqlite": - # Sqlite can't alter table add foreign key. - try: - cons = ForeignKeyConstraint( - [MetadataFile_table.c.lda_id], - [LibraryDatasetDatasetAssociation_table.c.id], - name="metadata_file_lda_id_fkey", - ) - # Create the constraint - cons.create() - except Exception: - log.exception("Adding foreign key constraint 'metadata_file_lda_id_fkey' to table 'metadata_file' failed.") - # Make sure we have at least 1 user - cmd = "SELECT * FROM galaxy_user;" - users = migrate_engine.execute(cmd).fetchall() - if users: - cmd = "SELECT * FROM role;" - roles = migrate_engine.execute(cmd).fetchall() - if not roles: - # Create private roles for each user - pass 1 - cmd = ( - "INSERT INTO role " - + "SELECT %s AS id," - + "%s AS create_time," - + "%s AS update_time," - + "email AS name," - + "email AS description," - + "'private' As type," - + "%s AS deleted " - + "FROM galaxy_user " - + "ORDER BY id;" - ) - cmd = cmd % ( - nextval(migrate_engine, "role"), - localtimestamp(migrate_engine), - localtimestamp(migrate_engine), - engine_false(migrate_engine), - ) - migrate_engine.execute(cmd) - # Create private roles for each user - pass 2 - if migrate_engine.name in ["postgres", "postgresql", "sqlite"]: - cmd = "UPDATE role SET description = 'Private role for ' || description;" - elif migrate_engine.name == "mysql": - cmd = "UPDATE role SET description = CONCAT( 'Private role for ', description );" - migrate_engine.execute(cmd) - # Create private roles for each user - pass 3 - cmd = ( - "INSERT INTO user_role_association " - + "SELECT %s AS id," - + "galaxy_user.id AS user_id," - + "role.id AS role_id," - + "%s AS create_time," - + "%s AS update_time " - + "FROM galaxy_user, role " - + "WHERE galaxy_user.email = role.name " - + "ORDER BY galaxy_user.id;" - ) - cmd = cmd % ( - nextval(migrate_engine, "user_role_association"), - localtimestamp(migrate_engine), - localtimestamp(migrate_engine), - ) - migrate_engine.execute(cmd) - # Create default permissions for each user - cmd = ( - "INSERT INTO default_user_permissions " - + "SELECT %s AS id," - + "galaxy_user.id AS user_id," - + "'manage permissions' AS action," - + "user_role_association.role_id AS role_id " - + "FROM galaxy_user " - + "JOIN user_role_association ON user_role_association.user_id = galaxy_user.id " - + "ORDER BY galaxy_user.id;" - ) - cmd = cmd % nextval(migrate_engine, "default_user_permissions") - migrate_engine.execute(cmd) - # Create default history permissions for each active history associated with a user - - cmd = ( - "INSERT INTO default_history_permissions " - + "SELECT %s AS id," - + "history.id AS history_id," - + "'manage permissions' AS action," - + "user_role_association.role_id AS role_id " - + "FROM history " - + "JOIN user_role_association ON user_role_association.user_id = history.user_id " - + "WHERE history.purged = %s AND history.user_id IS NOT NULL;" - ) - cmd = cmd % (nextval(migrate_engine, "default_history_permissions"), engine_false(migrate_engine)) - migrate_engine.execute(cmd) - # Create "manage permissions" dataset_permissions for all activate-able datasets - cmd = ( - "INSERT INTO dataset_permissions " - + "SELECT %s AS id," - + "%s AS create_time," - + "%s AS update_time," - + "'manage permissions' AS action," - + "history_dataset_association.dataset_id AS dataset_id," - + "user_role_association.role_id AS role_id " - + "FROM history " - + "JOIN history_dataset_association ON history_dataset_association.history_id = history.id " - + "JOIN dataset ON history_dataset_association.dataset_id = dataset.id " - + "JOIN user_role_association ON user_role_association.user_id = history.user_id " - + "WHERE dataset.purged = %s AND history.user_id IS NOT NULL;" - ) - cmd = cmd % ( - nextval(migrate_engine, "dataset_permissions"), - localtimestamp(migrate_engine), - localtimestamp(migrate_engine), - engine_false(migrate_engine), - ) - migrate_engine.execute(cmd) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - # NOTE: all new data added in the upgrade method is eliminated here via table drops - # Drop 1 foreign key constraint from the metadata_file table - MetadataFile_table = Table("metadata_file", metadata, autoload=True) - LibraryDatasetDatasetAssociation_table = Table("library_dataset_dataset_association", metadata, autoload=True) - try: - cons = ForeignKeyConstraint( - [MetadataFile_table.c.lda_id], - [LibraryDatasetDatasetAssociation_table.c.id], - name="metadata_file_lda_id_fkey", - ) - # Drop the constraint - cons.drop() - except Exception: - log.exception("Dropping foreign key constraint 'metadata_file_lda_id_fkey' from table 'metadata_file' failed.") - # Drop 1 foreign key constraint from the history_dataset_association table - HistoryDatasetAssociation_table = Table("history_dataset_association", metadata, autoload=True) - LibraryDatasetDatasetAssociation_table = Table("library_dataset_dataset_association", metadata, autoload=True) - try: - cons = ForeignKeyConstraint( - [HistoryDatasetAssociation_table.c.copied_from_library_dataset_dataset_association_id], - [LibraryDatasetDatasetAssociation_table.c.id], - name="history_dataset_association_copied_from_library_dataset_da_fkey", - ) - # Drop the constraint - cons.drop() - except Exception: - log.exception( - "Dropping foreign key constraint 'history_dataset_association_copied_from_library_dataset_da_fkey' from table 'history_dataset_association' failed." - ) - # Drop all of the new tables above - TABLES = [ - UserGroupAssociation_table, - UserRoleAssociation_table, - GroupRoleAssociation_table, - Group_table, - DatasetPermissions_table, - LibraryPermissions_table, - LibraryFolderPermissions_table, - LibraryDatasetPermissions_table, - LibraryDatasetDatasetAssociationPermissions_table, - LibraryItemInfoPermissions_table, - LibraryItemInfoTemplatePermissions_table, - DefaultUserPermissions_table, - DefaultHistoryPermissions_table, - Role_table, - LibraryDatasetDatasetInfoAssociation_table, - LibraryDataset_table, - LibraryDatasetDatasetAssociation_table, - LibraryDatasetDatasetInfoTemplateAssociation_table, - JobExternalOutputMetadata_table, - Library_table, - LibraryFolder_table, - LibraryItemInfoTemplateElement_table, - LibraryInfoTemplateAssociation_table, - LibraryFolderInfoTemplateAssociation_table, - LibraryDatasetInfoTemplateAssociation_table, - LibraryInfoAssociation_table, - LibraryFolderInfoAssociation_table, - LibraryDatasetInfoAssociation_table, - LibraryItemInfoElement_table, - LibraryItemInfo_table, - LibraryItemInfoTemplate_table, - ] - for table in TABLES: - drop_table(table) - # Drop the index on the Job.state column - changeset 2192 - drop_index("ix_job_state", "job", "state", metadata) - # Drop 1 column from the stored_workflow table - changeset 2328 - drop_column("importable", "stored_workflow", metadata) - # Drop 1 column from the metadata_file table - drop_column("lda_id", "metadata_file", metadata) - # Drop 1 column from the history_dataset_association table - drop_column("copied_from_library_dataset_dataset_association_id", HistoryDatasetAssociation_table) - # Drop 2 columns from the galaxy_user table - User_table = Table("galaxy_user", metadata, autoload=True) - drop_column("deleted", User_table) - drop_column("purged", User_table) diff --git a/lib/galaxy/model/migrate/versions/0004_indexes_and_defaults.py b/lib/galaxy/model/migrate/versions/0004_indexes_and_defaults.py deleted file mode 100644 index a3c3d72e17bd..000000000000 --- a/lib/galaxy/model/migrate/versions/0004_indexes_and_defaults.py +++ /dev/null @@ -1,50 +0,0 @@ -""" -""" - -import logging - -from sqlalchemy import ( - MetaData, - Table, -) - -from galaxy.model.migrate.versions.util import ( - add_index, - engine_false, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - User_table = Table("galaxy_user", metadata, autoload=True) - # The next add_index() calls are not needed any more after commit - # 7ee93c0995123b0f357abd649326295dfa06766c , but harmless - add_index("ix_galaxy_user_deleted", User_table, "deleted") - add_index("ix_galaxy_user_purged", User_table, "purged") - # Set the default data in the galaxy_user table, but only for null values - cmd = f"UPDATE galaxy_user SET deleted = {engine_false(migrate_engine)} WHERE deleted is null" - try: - migrate_engine.execute(cmd) - except Exception: - log.exception("Setting default data for galaxy_user.deleted column failed.") - cmd = f"UPDATE galaxy_user SET purged = {engine_false(migrate_engine)} WHERE purged is null" - try: - migrate_engine.execute(cmd) - except Exception: - log.exception("Setting default data for galaxy_user.purged column failed.") - add_index( - "ix_hda_copied_from_library_dataset_dataset_association_id", - "history_dataset_association", - "copied_from_library_dataset_dataset_association_id", - metadata, - ) - - -def downgrade(migrate_engine): - pass diff --git a/lib/galaxy/model/migrate/versions/0005_cleanup_datasets_fix.py b/lib/galaxy/model/migrate/versions/0005_cleanup_datasets_fix.py deleted file mode 100644 index 82ef1ddc832d..000000000000 --- a/lib/galaxy/model/migrate/versions/0005_cleanup_datasets_fix.py +++ /dev/null @@ -1,9 +0,0 @@ -"""No-op cleanup for existing datasets.""" - - -def upgrade(migrate_engine): - print(__doc__) - - -def downgrade(migrate_engine): - pass diff --git a/lib/galaxy/model/migrate/versions/0006_change_qual_datatype.py b/lib/galaxy/model/migrate/versions/0006_change_qual_datatype.py deleted file mode 100644 index d73bb40c1d22..000000000000 --- a/lib/galaxy/model/migrate/versions/0006_change_qual_datatype.py +++ /dev/null @@ -1,33 +0,0 @@ -""" -This migration script changes certain values in the history_dataset_association.extension -column, specifically 'qual' is changed to be 'qual454'. -""" - -import logging - -from sqlalchemy import MetaData - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - # Set the default data in the galaxy_user table, but only for null values - cmd = "UPDATE history_dataset_association SET extension = 'qual454' WHERE extension = 'qual' and peek like '>%%'" - try: - migrate_engine.execute(cmd) - except Exception: - log.exception("Resetting extension qual to qual454 in history_dataset_association failed.") - cmd = "UPDATE history_dataset_association SET extension = 'qualsolexa' WHERE extension = 'qual' and peek not like '>%%'" - try: - migrate_engine.execute(cmd) - except Exception: - log.exception("Resetting extension qual to qualsolexa in history_dataset_association failed.") - - -def downgrade(migrate_engine): - pass diff --git a/lib/galaxy/model/migrate/versions/0007_sharing_histories.py b/lib/galaxy/model/migrate/versions/0007_sharing_histories.py deleted file mode 100644 index fdfd555a57cc..000000000000 --- a/lib/galaxy/model/migrate/versions/0007_sharing_histories.py +++ /dev/null @@ -1,52 +0,0 @@ -""" -This migration script creates the new history_user_share_association table, and adds -a new boolean type column to the history table. This provides support for sharing -histories in the same way that workflows are shared. -""" - -import logging - -from sqlalchemy import ( - Boolean, - Column, - ForeignKey, - Integer, - MetaData, - Table, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - create_table, - drop_column, - drop_table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -HistoryUserShareAssociation_table = Table( - "history_user_share_association", - metadata, - Column("id", Integer, primary_key=True), - Column("history_id", Integer, ForeignKey("history.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(HistoryUserShareAssociation_table) - col = Column("importable", Boolean, index=True, default=False) - add_column(col, "history", metadata, index_name="ix_history_importable") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("importable", "history", metadata) - drop_table(HistoryUserShareAssociation_table) diff --git a/lib/galaxy/model/migrate/versions/0008_galaxy_forms.py b/lib/galaxy/model/migrate/versions/0008_galaxy_forms.py deleted file mode 100644 index f7b3fb914870..000000000000 --- a/lib/galaxy/model/migrate/versions/0008_galaxy_forms.py +++ /dev/null @@ -1,166 +0,0 @@ -""" -This migration script adds the following new tables for supporting Galaxy forms: -1) form_definition_current -2) form_definition -3) form_values -4) request_type -5) request -6) sample -7) sample_state -8) sample_event -""" - -import datetime -import logging - -from sqlalchemy import ( - Boolean, - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, - TEXT, -) - -from galaxy.model.custom_types import ( - JSONType, - TrimmedString, -) -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - -FormDefinition_table = Table( - "form_definition", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("name", TrimmedString(255), nullable=False), - Column("desc", TEXT), - Column( - "form_definition_current_id", - Integer, - ForeignKey("form_definition_current.id", use_alter=True), - index=True, - nullable=False, - ), - Column("fields", JSONType), -) - -FormDefinitionCurrent_table = Table( - "form_definition_current", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("latest_form_id", Integer, ForeignKey("form_definition.id"), index=True), - Column("deleted", Boolean, index=True, default=False), -) - -FormValues_table = Table( - "form_values", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("form_definition_id", Integer, ForeignKey("form_definition.id"), index=True), - Column("content", JSONType), -) - -RequestType_table = Table( - "request_type", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("name", TrimmedString(255), nullable=False), - Column("desc", TEXT), - Column("request_form_id", Integer, ForeignKey("form_definition.id"), index=True), - Column("sample_form_id", Integer, ForeignKey("form_definition.id"), index=True), -) - -Request_table = Table( - "request", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("name", TrimmedString(255), nullable=False), - Column("desc", TEXT), - Column("form_values_id", Integer, ForeignKey("form_values.id"), index=True), - Column("request_type_id", Integer, ForeignKey("request_type.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("library_id", Integer, ForeignKey("library.id"), index=True), - Column("deleted", Boolean, index=True, default=False), -) - -Sample_table = Table( - "sample", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("name", TrimmedString(255), nullable=False), - Column("desc", TEXT), - Column("form_values_id", Integer, ForeignKey("form_values.id"), index=True), - Column("request_id", Integer, ForeignKey("request.id"), index=True), - Column("deleted", Boolean, index=True, default=False), -) - -SampleState_table = Table( - "sample_state", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("name", TrimmedString(255), nullable=False), - Column("desc", TEXT), - Column("request_type_id", Integer, ForeignKey("request_type.id"), index=True), -) - -SampleEvent_table = Table( - "sample_event", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("sample_id", Integer, ForeignKey("sample.id"), index=True), - Column("sample_state_id", Integer, ForeignKey("sample_state.id"), index=True), - Column("comment", TEXT), -) - -TABLES = [ - FormDefinition_table, - FormDefinitionCurrent_table, - FormValues_table, - RequestType_table, - Request_table, - Sample_table, - SampleState_table, - SampleEvent_table, -] - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - for table in TABLES: - create_table(table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - for table in reversed(TABLES): - drop_table(table) diff --git a/lib/galaxy/model/migrate/versions/0009_request_table.py b/lib/galaxy/model/migrate/versions/0009_request_table.py deleted file mode 100644 index 2c26a82b27a5..000000000000 --- a/lib/galaxy/model/migrate/versions/0009_request_table.py +++ /dev/null @@ -1,44 +0,0 @@ -""" -This migration script adds a new column to 2 tables: -1) a new boolean type column named 'submitted' to the 'request' table -2) a new string type column named 'bar_code' to the 'sample' table -""" - -import logging - -from sqlalchemy import ( - Boolean, - Column, - MetaData, -) - -from galaxy.model.custom_types import TrimmedString -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - col = Column("submitted", Boolean, default=False) - add_column(col, "request", metadata) - - col = Column("bar_code", TrimmedString(255), index=True) - add_column(col, "sample", metadata, index_name="ix_sample_bar_code") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("bar_code", "sample", metadata) - # SQLAlchemy Migrate has a bug when dropping a boolean column in SQLite - if migrate_engine.name != "sqlite": - drop_column("submitted", "request", metadata) diff --git a/lib/galaxy/model/migrate/versions/0010_hda_display_at_authz_table.py b/lib/galaxy/model/migrate/versions/0010_hda_display_at_authz_table.py deleted file mode 100644 index 8f3675a2e102..000000000000 --- a/lib/galaxy/model/migrate/versions/0010_hda_display_at_authz_table.py +++ /dev/null @@ -1,59 +0,0 @@ -""" -This migration script adds the history_dataset_association_display_at_authorization table, -which allows 'private' datasets to be displayed at external sites without making them public. - -If using mysql, this script will display the following error, which is corrected in the next -migration script: -history_dataset_association_display_at_authorization table failed: (OperationalError) -(1059, "Identifier name 'ix_history_dataset_association_display_at_authorization_update_time' -is too long -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, -) - -from galaxy.model.custom_types import TrimmedString - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - -HistoryDatasetAssociationDisplayAtAuthorization_table = Table( - "history_dataset_association_display_at_authorization", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, index=True, default=now, onupdate=now), - Column("history_dataset_association_id", Integer, ForeignKey("history_dataset_association.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("site", TrimmedString(255)), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - try: - HistoryDatasetAssociationDisplayAtAuthorization_table.create() - except Exception: - log.exception("Creating history_dataset_association_display_at_authorization table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - # Load existing tables - metadata.reflect() - try: - HistoryDatasetAssociationDisplayAtAuthorization_table.drop() - except Exception: - log.exception("Dropping history_dataset_association_display_at_authorization table failed.") diff --git a/lib/galaxy/model/migrate/versions/0011_v0010_mysql_index_fix.py b/lib/galaxy/model/migrate/versions/0011_v0010_mysql_index_fix.py deleted file mode 100644 index 397c878a35fb..000000000000 --- a/lib/galaxy/model/migrate/versions/0011_v0010_mysql_index_fix.py +++ /dev/null @@ -1,47 +0,0 @@ -""" -This script fixes a problem introduced in the previous migration script -0010_hda_display_at_authz_table.py . MySQL has a name length limit and -thus the index "ix_hdadaa_history_dataset_association_id" has to be -manually created. -""" - -import datetime -import logging - -from sqlalchemy import MetaData - -from galaxy.model.migrate.versions.util import ( - add_index, - drop_index, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - if migrate_engine.name == "mysql": - add_index( - "ix_hdadaa_history_dataset_association_id", - "history_dataset_association_display_at_authorization", - "history_dataset_association_id", - metadata, - ) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - if migrate_engine.name == "mysql": - drop_index( - "ix_hdadaa_history_dataset_association_id", - "history_dataset_association_display_at_authorization", - "history_dataset_association_id", - metadata, - ) diff --git a/lib/galaxy/model/migrate/versions/0012_user_address.py b/lib/galaxy/model/migrate/versions/0012_user_address.py deleted file mode 100644 index 06bda287d470..000000000000 --- a/lib/galaxy/model/migrate/versions/0012_user_address.py +++ /dev/null @@ -1,76 +0,0 @@ -""" -This script adds a new user_address table that is currently only used with sample requests, where -a user can select from a list of his addresses to associate with the request. This script also -drops the request.submitted column which was boolean and replaces it with a request.state column -which is a string, allowing for more flexibility with request states. -""" - -import datetime -import logging - -from sqlalchemy import ( - Boolean, - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, - TEXT, -) - -from galaxy.model.custom_types import TrimmedString -from galaxy.model.migrate.versions.util import ( - add_column, - create_table, - drop_column, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - -UserAddress_table = Table( - "user_address", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("desc", TEXT), - Column("name", TrimmedString(255), nullable=False), - Column("institution", TrimmedString(255)), - Column("address", TrimmedString(255), nullable=False), - Column("city", TrimmedString(255), nullable=False), - Column("state", TrimmedString(255), nullable=False), - Column("postal_code", TrimmedString(255), nullable=False), - Column("country", TrimmedString(255), nullable=False), - Column("phone", TrimmedString(255)), - Column("deleted", Boolean, index=True, default=False), - Column("purged", Boolean, index=True, default=False), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - # Add all of the new tables above - create_table(UserAddress_table) - # Add 1 column to the request_type table - col = Column("deleted", Boolean, index=True, default=False) - add_column(col, "request_type", metadata, index_name="ix_request_type_deleted") - - # Delete the submitted column - # This fails for sqlite, so skip the drop -- no conflicts in the future - Request_table = Table("request", metadata, autoload=True) - # SQLAlchemy Migrate has a bug when dropping a boolean column in SQLite - if migrate_engine.name != "sqlite": - drop_column("submitted", Request_table) - col = Column("state", TrimmedString(255), index=True) - add_column(col, Request_table, metadata, index_name="ix_request_state") - - -def downgrade(migrate_engine): - pass diff --git a/lib/galaxy/model/migrate/versions/0013_change_lib_item_templates_to_forms.py b/lib/galaxy/model/migrate/versions/0013_change_lib_item_templates_to_forms.py deleted file mode 100644 index 57b6440e5735..000000000000 --- a/lib/galaxy/model/migrate/versions/0013_change_lib_item_templates_to_forms.py +++ /dev/null @@ -1,106 +0,0 @@ -""" -This migration script eliminates all of the tables that were used for the 1st version of the -library templates where template fields and contents were each stored as a separate table row -in various library item tables. All of these tables are dropped in this script, eliminating all -existing template data. A total of 14 existing tables are dropped. - -We're now basing library templates on forms, so field contents are -stored as a jsonified list in the form_values table. This script introduces the following 3 -new association tables: -1) library_info_association -2) library_folder_info_association -3) library_dataset_dataset_info_association -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - Table, -) - -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -OLD_TABLE_NAMES = [ - "library_item_info_permissions", - "library_item_info_template_permissions", - "library_item_info_element", - "library_item_info_template_element", - "library_info_template_association", - "library_folder_info_template_association", - "library_dataset_info_template_association", - "library_dataset_dataset_info_template_association", - "library_info_association", - "library_folder_info_association", - "library_dataset_info_association", - "library_dataset_dataset_info_association", - "library_item_info", - "library_item_info_template", -] - -LibraryInfoAssociation_table = Table( - "library_info_association", - metadata, - Column("id", Integer, primary_key=True), - Column("library_id", Integer, ForeignKey("library.id"), index=True), - Column("form_definition_id", Integer, ForeignKey("form_definition.id"), index=True), - Column("form_values_id", Integer, ForeignKey("form_values.id"), index=True), -) - -LibraryFolderInfoAssociation_table = Table( - "library_folder_info_association", - metadata, - Column("id", Integer, primary_key=True), - Column("library_folder_id", Integer, ForeignKey("library_folder.id"), nullable=True, index=True), - Column("form_definition_id", Integer, ForeignKey("form_definition.id"), index=True), - Column("form_values_id", Integer, ForeignKey("form_values.id"), index=True), -) - -LibraryDatasetDatasetInfoAssociation_table = Table( - "library_dataset_dataset_info_association", - metadata, - Column("id", Integer, primary_key=True), - Column( - "library_dataset_dataset_association_id", - Integer, - ForeignKey("library_dataset_dataset_association.id"), - nullable=True, - index=True, - ), - Column("form_definition_id", Integer, ForeignKey("form_definition.id"), index=True), - Column("form_values_id", Integer, ForeignKey("form_values.id"), index=True), -) - -NEW_TABLES = [ - LibraryInfoAssociation_table, - LibraryFolderInfoAssociation_table, - LibraryDatasetDatasetInfoAssociation_table, -] - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - # Drop all of the original library_item_info tables - # NOTE: all existing library item into template data is eliminated here via table drops - for table_name in OLD_TABLE_NAMES: - drop_table(table_name, metadata) - - # Create all new tables above - for table in NEW_TABLES: - create_table(table) - - -def downgrade(migrate_engine): - pass diff --git a/lib/galaxy/model/migrate/versions/0014_pages.py b/lib/galaxy/model/migrate/versions/0014_pages.py deleted file mode 100644 index dd454ae228b3..000000000000 --- a/lib/galaxy/model/migrate/versions/0014_pages.py +++ /dev/null @@ -1,81 +0,0 @@ -""" -Migration script to add support for "Pages". - 1) Creates Page and PageRevision tables - 2) Adds username column to User table -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Index, - Integer, - MetaData, - String, - Table, - TEXT, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - create_table, - drop_column, - drop_table, -) - -now = datetime.datetime.utcnow -log = logging.getLogger(__name__) -metadata = MetaData() - -Page_table = Table( - "page", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=False), - Column( - "latest_revision_id", - Integer, - ForeignKey("page_revision.id", use_alter=True, name="page_latest_revision_id_fk"), - index=True, - ), - Column("title", TEXT), - Column("slug", TEXT), - Index("ix_page_slug", "slug", unique=True, mysql_length=200), -) - -PageRevision_table = Table( - "page_revision", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("page_id", Integer, ForeignKey("page.id"), index=True, nullable=False), - Column("title", TEXT), - Column("content", TEXT), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(Page_table) - create_table(PageRevision_table) - - col = Column("username", String(255), index=True, unique=True, default=False) - add_column(col, "galaxy_user", metadata, index_name="ix_galaxy_user_username", unique_name="username") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("username", "galaxy_user", metadata) - drop_table(PageRevision_table) - drop_table(Page_table) diff --git a/lib/galaxy/model/migrate/versions/0015_tagging.py b/lib/galaxy/model/migrate/versions/0015_tagging.py deleted file mode 100644 index 835635bea03a..000000000000 --- a/lib/galaxy/model/migrate/versions/0015_tagging.py +++ /dev/null @@ -1,86 +0,0 @@ -""" -This migration script adds the tables necessary to support tagging of histories, -datasets, and history-dataset associations (user views of datasets). -""" - -import logging - -from migrate import UniqueConstraint -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - Table, -) - -from galaxy.model.custom_types import TrimmedString -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -# New tables to support tagging of histories, datasets, and history-dataset associations. -Tag_table = Table( - "tag", - metadata, - Column("id", Integer, primary_key=True), - Column("type", Integer), - Column("parent_id", Integer, ForeignKey("tag.id")), - Column("name", TrimmedString(255)), - UniqueConstraint("name"), -) - -HistoryTagAssociation_table = Table( - "history_tag_association", - metadata, - Column("history_id", Integer, ForeignKey("history.id"), index=True), - Column("tag_id", Integer, ForeignKey("tag.id"), index=True), - Column("user_tname", TrimmedString(255), index=True), - Column("value", TrimmedString(255), index=True), - Column("user_value", TrimmedString(255), index=True), -) - -DatasetTagAssociation_table = Table( - "dataset_tag_association", - metadata, - Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True), - Column("tag_id", Integer, ForeignKey("tag.id"), index=True), - Column("user_tname", TrimmedString(255), index=True), - Column("value", TrimmedString(255), index=True), - Column("user_value", TrimmedString(255), index=True), -) - -HistoryDatasetAssociationTagAssociation_table = Table( - "history_dataset_association_tag_association", - metadata, - Column("history_dataset_association_id", Integer, ForeignKey("history_dataset_association.id"), index=True), - Column("tag_id", Integer, ForeignKey("tag.id"), index=True), - Column("user_tname", TrimmedString(255), index=True), - Column("value", TrimmedString(255), index=True), - Column("user_value", TrimmedString(255), index=True), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(Tag_table) - create_table(HistoryTagAssociation_table) - create_table(DatasetTagAssociation_table) - create_table(HistoryDatasetAssociationTagAssociation_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_table(HistoryDatasetAssociationTagAssociation_table) - drop_table(DatasetTagAssociation_table) - drop_table(HistoryTagAssociation_table) - drop_table(Tag_table) diff --git a/lib/galaxy/model/migrate/versions/0016_v0015_mysql_index_fix.py b/lib/galaxy/model/migrate/versions/0016_v0015_mysql_index_fix.py deleted file mode 100644 index ff5b0cfc9b84..000000000000 --- a/lib/galaxy/model/migrate/versions/0016_v0015_mysql_index_fix.py +++ /dev/null @@ -1,53 +0,0 @@ -""" -This script was used to fix a problem introduced in 0015_tagging.py. MySQL has a -name length limit and thus the index "ix_hda_ta_history_dataset_association_id" -had to be manually created. - -This is now fixed in SQLAlchemy Migrate. -""" - -import logging - -from sqlalchemy import ( - MetaData, - Table, -) - -from galaxy.model.migrate.versions.util import ( - add_index, - drop_index, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - HistoryDatasetAssociationTagAssociation_table = Table( - "history_dataset_association_tag_association", metadata, autoload=True - ) - if not any( - [_.name for _ in index.columns] == ["history_dataset_association_id"] - for index in HistoryDatasetAssociationTagAssociation_table.indexes - ): - add_index( - "ix_hda_ta_history_dataset_association_id", - HistoryDatasetAssociationTagAssociation_table, - "history_dataset_association_id", - ) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_index( - "ix_hda_ta_history_dataset_association_id", - "history_dataset_association_tag_association", - "history_dataset_association_id", - metadata, - ) diff --git a/lib/galaxy/model/migrate/versions/0017_library_item_indexes.py b/lib/galaxy/model/migrate/versions/0017_library_item_indexes.py deleted file mode 100644 index 7a29439fd97f..000000000000 --- a/lib/galaxy/model/migrate/versions/0017_library_item_indexes.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -This script adds 3 indexes to table columns: library_folder.name, -library_dataset.name, library_dataset_dataset_association.name. -""" - -import logging - -from sqlalchemy import MetaData - -from galaxy.model.migrate.versions.util import ( - add_index, - drop_index, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - add_index("ix_library_folder_name", "library_folder", "name", metadata) - add_index("ix_library_dataset_dataset_association_name", "library_dataset_dataset_association", "name", metadata) - add_index("ix_library_dataset_name", "library_dataset", "name", metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_index("ix_library_dataset_name", "library_dataset", "name", metadata) - drop_index("ix_library_dataset_dataset_association_name", "library_dataset_dataset_association", "name", metadata) - drop_index("ix_library_folder_name", "library_folder", "name", metadata) diff --git a/lib/galaxy/model/migrate/versions/0018_ordered_tags_and_page_tags.py b/lib/galaxy/model/migrate/versions/0018_ordered_tags_and_page_tags.py deleted file mode 100644 index f64c84ff1b05..000000000000 --- a/lib/galaxy/model/migrate/versions/0018_ordered_tags_and_page_tags.py +++ /dev/null @@ -1,127 +0,0 @@ -""" -This migration script provides support for (a) ordering tags by recency and -(b) tagging pages. This script deletes all existing tags. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Index, - Integer, - MetaData, - Table, -) -from sqlalchemy.exc import OperationalError - -# Need our custom types, but don't import anything else from model -from galaxy.model.custom_types import TrimmedString - -log = logging.getLogger(__name__) -metadata = MetaData() - -HistoryTagAssociation_table = Table( - "history_tag_association", - metadata, - Column("id", Integer, primary_key=True), - Column("history_id", Integer, ForeignKey("history.id"), index=True), - Column("tag_id", Integer, ForeignKey("tag.id"), index=True), - Column("user_tname", TrimmedString(255), index=True), - Column("value", TrimmedString(255), index=True), - Column("user_value", TrimmedString(255), index=True), -) - -DatasetTagAssociation_table = Table( - "dataset_tag_association", - metadata, - Column("id", Integer, primary_key=True), - Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True), - Column("tag_id", Integer, ForeignKey("tag.id"), index=True), - Column("user_tname", TrimmedString(255), index=True), - Column("value", TrimmedString(255), index=True), - Column("user_value", TrimmedString(255), index=True), -) - -HistoryDatasetAssociationTagAssociation_table = Table( - "history_dataset_association_tag_association", - metadata, - Column("id", Integer, primary_key=True), - Column("history_dataset_association_id", Integer, ForeignKey("history_dataset_association.id"), index=True), - Column("tag_id", Integer, ForeignKey("tag.id"), index=True), - Column("user_tname", TrimmedString(255), index=True), - Column("value", TrimmedString(255), index=True), - Column("user_value", TrimmedString(255), index=True), -) - -PageTagAssociation_table = Table( - "page_tag_association", - metadata, - Column("id", Integer, primary_key=True), - Column("page_id", Integer, ForeignKey("page.id"), index=True), - Column("tag_id", Integer, ForeignKey("tag.id"), index=True), - Column("user_tname", TrimmedString(255), index=True), - Column("value", TrimmedString(255), index=True), - Column("user_value", TrimmedString(255), index=True), -) - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - - # - # Recreate tables. - # - try: - HistoryTagAssociation_table.drop() - HistoryTagAssociation_table.create() - except Exception: - log.exception("Recreating history_tag_association table failed.") - - try: - DatasetTagAssociation_table.drop() - DatasetTagAssociation_table.create() - except Exception: - log.exception("Recreating dataset_tag_association table failed.") - - try: - HistoryDatasetAssociationTagAssociation_table.drop() - HistoryDatasetAssociationTagAssociation_table.create() - except OperationalError as e: - # Handle error that results from and index name that is too long; this occurs - # in MySQL. - if str(e).find("CREATE INDEX") != -1: - # Manually create index. - i = Index( - "ix_hda_ta_history_dataset_association_id", - HistoryDatasetAssociationTagAssociation_table.c.history_dataset_association_id, - ) - try: - i.create() - except Exception: - log.exception( - "Adding index 'ix_hda_ta_history_dataset_association_id' to table 'history_dataset_association_tag_association' table failed." - ) - except Exception: - log.exception("Recreating history_dataset_association_tag_association table failed.") - - # Create page_tag_association table. - try: - PageTagAssociation_table.create() - except Exception: - log.exception("Creating page_tag_association table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - # No need to downgrade other tagging tables. They work fine with verision 16 code. - - # Drop page_tag_association table. - try: - PageTagAssociation_table.drop() - except Exception: - log.exception("Dropping page_tag_association table failed.") diff --git a/lib/galaxy/model/migrate/versions/0019_request_library_folder.py b/lib/galaxy/model/migrate/versions/0019_request_library_folder.py deleted file mode 100644 index 98937561d62b..000000000000 --- a/lib/galaxy/model/migrate/versions/0019_request_library_folder.py +++ /dev/null @@ -1,44 +0,0 @@ -""" -This script creates a request.folder_id column which is a foreign -key to the library_folder table. This also adds a 'type' and 'layout' column -to the form_definition table. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - Table, -) - -from galaxy.model.custom_types import ( - JSONType, - TrimmedString, -) -from galaxy.model.migrate.versions.util import add_column - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - # Create the folder_id column - col = Column("folder_id", Integer, ForeignKey("library_folder.id"), index=True) - add_column(col, "request", metadata, index_name="ix_request_folder_id") - # Create the type column in form_definition - FormDefinition_table = Table("form_definition", metadata, autoload=True) - col = Column("type", TrimmedString(255), index=True) - add_column(col, FormDefinition_table, metadata, index_name="ix_form_definition_type") - col = Column("layout", JSONType) - add_column(col, FormDefinition_table, metadata) - - -def downgrade(migrate_engine): - pass diff --git a/lib/galaxy/model/migrate/versions/0020_library_upload_job.py b/lib/galaxy/model/migrate/versions/0020_library_upload_job.py deleted file mode 100644 index 340c3f602450..000000000000 --- a/lib/galaxy/model/migrate/versions/0020_library_upload_job.py +++ /dev/null @@ -1,71 +0,0 @@ -""" -This script creates a job_to_output_library_dataset table for allowing library -uploads to run as regular jobs. To support this, a library_folder_id column is -added to the job table, and library_folder/output_library_datasets relations -are added to the Job object. An index is also added to the dataset.state -column. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - String, - Table, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - add_index, - create_table, - drop_column, - drop_index, - drop_table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -JobToOutputLibraryDataset_table = Table( - "job_to_output_library_dataset", - metadata, - Column("id", Integer, primary_key=True), - Column("job_id", Integer, ForeignKey("job.id"), index=True), - Column("ldda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True), - Column("name", String(255)), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - # Create the job_to_output_library_dataset table - create_table(JobToOutputLibraryDataset_table) - - # Create the library_folder_id column - col = Column( - "library_folder_id", Integer, ForeignKey("library_folder.id", name="job_library_folder_id_fk"), index=True - ) - add_column(col, "job", metadata, index_name="ix_job_library_folder_id") - - # Create the ix_dataset_state index - add_index("ix_dataset_state", "dataset", "state", metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - # Drop the ix_dataset_state index - drop_index("ix_dataset_state", "dataset", "state", metadata) - - # Drop the library_folder_id column - drop_column("library_folder_id", "job", metadata) - - # Drop the job_to_output_library_dataset table - drop_table(JobToOutputLibraryDataset_table) diff --git a/lib/galaxy/model/migrate/versions/0021_user_prefs.py b/lib/galaxy/model/migrate/versions/0021_user_prefs.py deleted file mode 100644 index 02b52b2d53f8..000000000000 --- a/lib/galaxy/model/migrate/versions/0021_user_prefs.py +++ /dev/null @@ -1,47 +0,0 @@ -""" -This migration script adds a user preferences table to Galaxy. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - Table, - Unicode, -) - -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -# New table to support user preferences. -UserPreference_table = Table( - "user_preference", - metadata, - Column("id", Integer, primary_key=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("name", Unicode(255), index=True), - Column("value", Unicode(1024)), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(UserPreference_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_table(UserPreference_table) diff --git a/lib/galaxy/model/migrate/versions/0022_visualization_tables.py b/lib/galaxy/model/migrate/versions/0022_visualization_tables.py deleted file mode 100644 index 03b06635e6c4..000000000000 --- a/lib/galaxy/model/migrate/versions/0022_visualization_tables.py +++ /dev/null @@ -1,70 +0,0 @@ -""" -Migration script to add support for storing visualizations. - 1) Creates Visualization and VisualizationRevision tables -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, - TEXT, -) - -now = datetime.datetime.utcnow -log = logging.getLogger(__name__) -metadata = MetaData() - -Visualization_table = Table( - "visualization", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=False), - Column( - "latest_revision_id", - Integer, - ForeignKey("visualization_revision.id", use_alter=True, name="visualization_latest_revision_id_fk"), - index=True, - ), - Column("title", TEXT), - Column("type", TEXT), -) - -VisualizationRevision_table = Table( - "visualization_revision", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("visualization_id", Integer, ForeignKey("visualization.id"), index=True, nullable=False), - Column("title", TEXT), - Column("config", TEXT), -) - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - try: - Visualization_table.create() - except Exception: - log.exception("Could not create page table") - try: - VisualizationRevision_table.create() - except Exception: - log.exception("Could not create page_revision table") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - Visualization_table.drop() - VisualizationRevision_table.drop() diff --git a/lib/galaxy/model/migrate/versions/0023_page_published_and_deleted_columns.py b/lib/galaxy/model/migrate/versions/0023_page_published_and_deleted_columns.py deleted file mode 100644 index c2c09605e2d9..000000000000 --- a/lib/galaxy/model/migrate/versions/0023_page_published_and_deleted_columns.py +++ /dev/null @@ -1,42 +0,0 @@ -""" -Migration script to add columns for tracking whether pages are deleted and -publicly accessible. -""" - -import logging - -from sqlalchemy import ( - Boolean, - Column, - MetaData, - Table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - - print(__doc__) - metadata.reflect() - - Page_table = Table("page", metadata, autoload=True) - - c = Column("published", Boolean, index=True, default=False) - c.create(Page_table, index_name="ix_page_published") - assert c is Page_table.c.published - - c = Column("deleted", Boolean, index=True, default=False) - c.create(Page_table, index_name="ix_page_deleted") - assert c is Page_table.c.deleted - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - Page_table = Table("page", metadata, autoload=True) - Page_table.c.published.drop() - Page_table.c.deleted.drop() diff --git a/lib/galaxy/model/migrate/versions/0024_page_slug_unique_constraint.py b/lib/galaxy/model/migrate/versions/0024_page_slug_unique_constraint.py deleted file mode 100644 index c69d563444ee..000000000000 --- a/lib/galaxy/model/migrate/versions/0024_page_slug_unique_constraint.py +++ /dev/null @@ -1,39 +0,0 @@ -""" -Remove unique constraint from page slugs to allow creating a page with -the same slug as a deleted page. -""" - -import logging - -from sqlalchemy import ( - MetaData, - Table, -) - -from galaxy.model.migrate.versions.util import ( - add_index, - drop_index, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - Page_table = Table("page", metadata, autoload=True) - try: - # Sqlite doesn't support .alter, so we need to drop an recreate - drop_index("ix_page_slug", Page_table, "slug") - - add_index("ix_page_slug", Page_table, "slug", unique=False) - except Exception: - # Mysql doesn't have a named index, but alter should work - Page_table.c.slug.alter(unique=False) - - -def downgrade(migrate_engine): - pass diff --git a/lib/galaxy/model/migrate/versions/0025_user_info.py b/lib/galaxy/model/migrate/versions/0025_user_info.py deleted file mode 100644 index 72c537cc9cd9..000000000000 --- a/lib/galaxy/model/migrate/versions/0025_user_info.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -This script adds a foreign key to the form_values table in the galaxy_user table -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - col = Column("form_values_id", Integer, ForeignKey("form_values.id", name="user_form_values_id_fk"), index=True) - add_column(col, "galaxy_user", metadata, index_name="ix_galaxy_user_form_values_id") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("form_values_id", "galaxy_user", metadata) diff --git a/lib/galaxy/model/migrate/versions/0026_cloud_tables.py b/lib/galaxy/model/migrate/versions/0026_cloud_tables.py deleted file mode 100644 index ee9440bf8877..000000000000 --- a/lib/galaxy/model/migrate/versions/0026_cloud_tables.py +++ /dev/null @@ -1,183 +0,0 @@ -""" -This script adds tables needed for Galaxy cloud functionality. -""" - -import datetime -import logging - -from sqlalchemy import ( - Boolean, - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, - TEXT, -) - -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -now = datetime.datetime.utcnow -log = logging.getLogger(__name__) -metadata = MetaData() - -CloudImage_table = Table( - "cloud_image", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("provider_type", TEXT), - Column("image_id", TEXT, nullable=False), - Column("manifest", TEXT), - Column("state", TEXT), - Column("architecture", TEXT), - Column("deleted", Boolean, default=False), -) - -""" UserConfiguredInstance (UCI) table """ -UCI_table = Table( - "cloud_uci", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=False), - Column("credentials_id", Integer, ForeignKey("cloud_user_credentials.id"), index=True), - Column("key_pair_name", TEXT), - Column("key_pair_material", TEXT), - Column("name", TEXT), - Column("state", TEXT), - Column("error", TEXT), - Column("total_size", Integer), - Column("launch_time", DateTime), - Column("deleted", Boolean, default=False), -) - -CloudInstance_table = Table( - "cloud_instance", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("launch_time", DateTime), - Column("stop_time", DateTime), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=False), - Column("uci_id", Integer, ForeignKey("cloud_uci.id"), index=True), - Column("type", TEXT), - Column("reservation_id", TEXT), - Column("instance_id", TEXT), - Column("mi_id", Integer, ForeignKey("cloud_image.id"), index=True), - Column("state", TEXT), - Column("error", TEXT), - Column("public_dns", TEXT), - Column("private_dns", TEXT), - Column("security_group", TEXT), - Column("availability_zone", TEXT), -) - -CloudStore_table = Table( - "cloud_store", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("attach_time", DateTime), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=False), - Column("uci_id", Integer, ForeignKey("cloud_uci.id"), index=True, nullable=False), - Column("volume_id", TEXT), - Column("size", Integer, nullable=False), - Column("availability_zone", TEXT), - Column("inst_id", Integer, ForeignKey("cloud_instance.id")), - Column("status", TEXT), - Column("device", TEXT), - Column("space_consumed", Integer), - Column("error", TEXT), - Column("deleted", Boolean, default=False), -) - -CloudSnapshot_table = Table( - "cloud_snapshot", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=False), - Column("uci_id", Integer, ForeignKey("cloud_uci.id"), index=True), - Column("store_id", Integer, ForeignKey("cloud_store.id"), index=True, nullable=False), - Column("snapshot_id", TEXT), - Column("status", TEXT), - Column("description", TEXT), - Column("error", TEXT), - Column("deleted", Boolean, default=False), -) - -CloudUserCredentials_table = Table( - "cloud_user_credentials", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=False), - Column("provider_id", Integer, ForeignKey("cloud_provider.id"), index=True, nullable=False), - Column("name", TEXT), - Column("access_key", TEXT), - Column("secret_key", TEXT), - Column("deleted", Boolean, default=False), -) - -CloudProvider_table = Table( - "cloud_provider", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=False), - Column("type", TEXT, nullable=False), - Column("name", TEXT), - Column("region_connection", TEXT), - Column("region_name", TEXT), - Column("region_endpoint", TEXT), - Column("is_secure", Boolean), - Column("host", TEXT), - Column("port", Integer), - Column("proxy", TEXT), - Column("proxy_port", TEXT), - Column("proxy_user", TEXT), - Column("proxy_pass", TEXT), - Column("debug", Integer), - Column("https_connection_factory", TEXT), - Column("path", TEXT), - Column("deleted", Boolean, default=False), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(CloudProvider_table) - create_table(CloudUserCredentials_table) - create_table(CloudImage_table) - create_table(UCI_table) - create_table(CloudInstance_table) - create_table(CloudStore_table) - create_table(CloudSnapshot_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_table(CloudSnapshot_table) - drop_table(CloudStore_table) - drop_table(CloudInstance_table) - drop_table(UCI_table) - drop_table(CloudImage_table) - drop_table(CloudUserCredentials_table) - drop_table(CloudProvider_table) diff --git a/lib/galaxy/model/migrate/versions/0027_request_events.py b/lib/galaxy/model/migrate/versions/0027_request_events.py deleted file mode 100644 index 4cb033b64e5a..000000000000 --- a/lib/galaxy/model/migrate/versions/0027_request_events.py +++ /dev/null @@ -1,72 +0,0 @@ -""" -This migration script adds the request_event table and -removes the state field in the request table -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, - TEXT, -) - -from galaxy.model.custom_types import TrimmedString -from galaxy.model.migrate.versions.util import ( - create_table, - drop_column, - localtimestamp, - nextval, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - -RequestEvent_table = Table( - "request_event", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("request_id", Integer, ForeignKey("request.id"), index=True), - Column("state", TrimmedString(255), index=True), - Column("comment", TEXT), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(RequestEvent_table) - # move the current state of all existing requests to the request_event table - cmd = ( - "INSERT INTO request_event " - + "SELECT %s AS id," - + "%s AS create_time," - + "%s AS update_time," - + "request.id AS request_id," - + "request.state AS state," - + "'%s' AS comment " - + "FROM request;" - ) - cmd = cmd % ( - nextval(migrate_engine, "request_event"), - localtimestamp(migrate_engine), - localtimestamp(migrate_engine), - "Imported from request table", - ) - migrate_engine.execute(cmd) - - drop_column("state", "request", metadata) - - -def downgrade(migrate_engine): - pass diff --git a/lib/galaxy/model/migrate/versions/0028_external_metadata_file_override.py b/lib/galaxy/model/migrate/versions/0028_external_metadata_file_override.py deleted file mode 100644 index db6fcd0c7020..000000000000 --- a/lib/galaxy/model/migrate/versions/0028_external_metadata_file_override.py +++ /dev/null @@ -1,37 +0,0 @@ -""" -This script adds the filename_override_metadata column to the JobExternalOutputMetadata table, -allowing existing metadata files to be written when using external metadata and a cluster -set up with read-only access to database/files -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, - String, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - col = Column("filename_override_metadata", String(255)) - add_column(col, "job_external_output_metadata", metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("filename_override_metadata", "job_external_output_metadata", metadata) diff --git a/lib/galaxy/model/migrate/versions/0029_user_actions.py b/lib/galaxy/model/migrate/versions/0029_user_actions.py deleted file mode 100644 index 4848fbd41a6f..000000000000 --- a/lib/galaxy/model/migrate/versions/0029_user_actions.py +++ /dev/null @@ -1,52 +0,0 @@ -""" -This migration script adds a user actions table to Galaxy. -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, - Unicode, -) - -now = datetime.datetime.utcnow -log = logging.getLogger(__name__) -metadata = MetaData() - -# New table to store user actions. -UserAction_table = Table( - "user_action", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("session_id", Integer, ForeignKey("galaxy_session.id"), index=True), - Column("action", Unicode(255)), - Column("context", Unicode(512)), - Column("params", Unicode(1024)), -) - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - try: - UserAction_table.create() - except Exception: - log.exception("Creating user_action table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - try: - UserAction_table.drop() - except Exception: - log.exception("Dropping user_action table failed.") diff --git a/lib/galaxy/model/migrate/versions/0030_history_slug_column.py b/lib/galaxy/model/migrate/versions/0030_history_slug_column.py deleted file mode 100644 index 09df4baeef9f..000000000000 --- a/lib/galaxy/model/migrate/versions/0030_history_slug_column.py +++ /dev/null @@ -1,41 +0,0 @@ -""" -Migration script to add column for a history slug. -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, - Table, - TEXT, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - add_index, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - History_table = Table("history", metadata, autoload=True) - c = Column("slug", TEXT) - add_column(c, History_table, metadata) - # Index needs to be added separately because MySQL cannot index a TEXT/BLOB - # column without specifying mysql_length - add_index("ix_history_slug", History_table, "slug") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("slug", "history", metadata) diff --git a/lib/galaxy/model/migrate/versions/0031_community_and_workflow_tags.py b/lib/galaxy/model/migrate/versions/0031_community_and_workflow_tags.py deleted file mode 100644 index 03ed15d4348a..000000000000 --- a/lib/galaxy/model/migrate/versions/0031_community_and_workflow_tags.py +++ /dev/null @@ -1,113 +0,0 @@ -""" -Migration script to (a) add and populate necessary columns for doing community tagging of histories, datasets, and pages and \ -(b) add table for doing individual and community tagging of workflows. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - Table, - Unicode, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - create_table, - drop_column, - drop_table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -StoredWorkflowTagAssociation_table = Table( - "stored_workflow_tag_association", - metadata, - Column("id", Integer, primary_key=True), - Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True), - Column("tag_id", Integer, ForeignKey("tag.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("user_tname", Unicode(255), index=True), - Column("value", Unicode(255), index=True), - Column("user_value", Unicode(255), index=True), -) - -WorkflowTagAssociation_table = Table( - "workflow_tag_association", - metadata, - Column("id", Integer, primary_key=True), - Column("workflow_id", Integer, ForeignKey("workflow.id"), index=True), - Column("tag_id", Integer, ForeignKey("tag.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("user_tname", Unicode(255), index=True), - Column("value", Unicode(255), index=True), - Column("user_value", Unicode(255), index=True), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - # Create user_id column in history_tag_association table. - c = Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True) - add_column(c, "history_tag_association", metadata, index_name="ix_history_tag_association_user_id") - - # Populate column so that user_id is the id of the user who owns the history (and, up to now, was the only person able to tag the history). - migrate_engine.execute( - "UPDATE history_tag_association SET user_id=( SELECT user_id FROM history WHERE history_tag_association.history_id = history.id )" - ) - - # Create user_id column in history_dataset_association_tag_association table. - c = Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True) - add_column( - c, - "history_dataset_association_tag_association", - metadata, - index_name="ix_history_dataset_association_tag_association_user_id", - ) - - # Populate column so that user_id is the id of the user who owns the history_dataset_association (and, up to now, was the only person able to tag the page). - migrate_engine.execute( - "UPDATE history_dataset_association_tag_association SET user_id=( SELECT history.user_id FROM history, history_dataset_association WHERE history_dataset_association.history_id = history.id AND history_dataset_association.id = history_dataset_association_tag_association.history_dataset_association_id)" - ) - - # Create user_id column in page_tag_association table. - c = Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True) - add_column(c, "page_tag_association", metadata, index_name="ix_page_tag_association_user_id") - - # Populate column so that user_id is the id of the user who owns the page (and, up to now, was the only person able to tag the page). - migrate_engine.execute( - "UPDATE page_tag_association SET user_id=( SELECT user_id FROM page WHERE page_tag_association.page_id = page.id )" - ) - - # Create stored_workflow_tag_association table. - create_table(StoredWorkflowTagAssociation_table) - - # Create workflow_tag_association table. - create_table(WorkflowTagAssociation_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - # Drop workflow_tag_association table. - drop_table(WorkflowTagAssociation_table) - - # Drop stored_workflow_tag_association table. - drop_table(StoredWorkflowTagAssociation_table) - - # Drop user_id column from page_tag_association table. - drop_column("user_id", "page_tag_association", metadata) - - # Drop user_id column from history_dataset_association_tag_association table. - drop_column("user_id", "history_dataset_association_tag_association", metadata) - - # Drop user_id column from history_tag_association table. - drop_column("user_id", "history_tag_association", metadata) diff --git a/lib/galaxy/model/migrate/versions/0032_stored_workflow_slug_column.py b/lib/galaxy/model/migrate/versions/0032_stored_workflow_slug_column.py deleted file mode 100644 index 579bd9bbf480..000000000000 --- a/lib/galaxy/model/migrate/versions/0032_stored_workflow_slug_column.py +++ /dev/null @@ -1,41 +0,0 @@ -""" -Migration script to add slug column for stored workflow. -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, - Table, - TEXT, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - add_index, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - StoredWorkflow_table = Table("stored_workflow", metadata, autoload=True) - c = Column("slug", TEXT) - add_column(c, StoredWorkflow_table, metadata) - # Index needs to be added separately because MySQL cannot index a TEXT/BLOB - # column without specifying mysql_length - add_index("ix_stored_workflow_slug", StoredWorkflow_table, "slug") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("slug", "stored_workflow", metadata) diff --git a/lib/galaxy/model/migrate/versions/0033_published_cols_for_histories_and_workflows.py b/lib/galaxy/model/migrate/versions/0033_published_cols_for_histories_and_workflows.py deleted file mode 100644 index 3e965229466b..000000000000 --- a/lib/galaxy/model/migrate/versions/0033_published_cols_for_histories_and_workflows.py +++ /dev/null @@ -1,76 +0,0 @@ -""" -Migration script to add necessary columns for distinguishing between viewing/importing and publishing histories, \ -workflows, and pages. Script adds published column to histories and workflows and importable column to pages. -""" - -import logging - -from sqlalchemy import ( - Boolean, - Column, - Index, - MetaData, - Table, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - # Create published column in history table. - History_table = Table("history", metadata, autoload=True) - c = Column("published", Boolean, index=True) - add_column(c, History_table, metadata, index_name="ix_history_published") - if migrate_engine.name != "sqlite": - # Create index for published column in history table. - try: - i = Index("ix_history_published", History_table.c.published) - i.create() - except Exception: - # Mysql doesn't have a named index, but alter should work - History_table.c.published.alter(unique=False) - - # Create published column in stored workflows table. - StoredWorkflow_table = Table("stored_workflow", metadata, autoload=True) - c = Column("published", Boolean, index=True) - add_column(c, StoredWorkflow_table, metadata, index_name="ix_stored_workflow_published") - if migrate_engine.name != "sqlite": - # Create index for published column in stored workflows table. - try: - i = Index("ix_stored_workflow_published", StoredWorkflow_table.c.published) - i.create() - except Exception: - # Mysql doesn't have a named index, but alter should work - StoredWorkflow_table.c.published.alter(unique=False) - - # Create importable column in page table. - Page_table = Table("page", metadata, autoload=True) - c = Column("importable", Boolean, index=True) - add_column(c, Page_table, metadata, index_name="ix_page_importable") - if migrate_engine.name != "sqlite": - # Create index for importable column in page table. - try: - i = Index("ix_page_importable", Page_table.c.importable) - i.create() - except Exception: - # Mysql doesn't have a named index, but alter should work - Page_table.c.importable.alter(unique=False) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("published", "history", metadata) - drop_column("published", "stored_workflow", metadata) - drop_column("importable", "page", metadata) diff --git a/lib/galaxy/model/migrate/versions/0034_page_user_share_association.py b/lib/galaxy/model/migrate/versions/0034_page_user_share_association.py deleted file mode 100644 index fa8b3fb3c079..000000000000 --- a/lib/galaxy/model/migrate/versions/0034_page_user_share_association.py +++ /dev/null @@ -1,47 +0,0 @@ -""" -Migration script to create a table for page-user share association. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - Table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -PageUserShareAssociation_table = Table( - "page_user_share_association", - metadata, - Column("id", Integer, primary_key=True), - Column("page_id", Integer, ForeignKey("page.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), -) - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - - # Create stored_workflow_tag_association table. - try: - PageUserShareAssociation_table.create() - except Exception: - log.exception("Creating page_user_share_association table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - # Drop workflow_tag_association table. - try: - PageUserShareAssociation_table.drop() - except Exception: - log.exception("Dropping page_user_share_association table failed.") diff --git a/lib/galaxy/model/migrate/versions/0035_item_annotations_and_workflow_step_tags.py b/lib/galaxy/model/migrate/versions/0035_item_annotations_and_workflow_step_tags.py deleted file mode 100644 index 35b6fe37214e..000000000000 --- a/lib/galaxy/model/migrate/versions/0035_item_annotations_and_workflow_step_tags.py +++ /dev/null @@ -1,105 +0,0 @@ -""" -Migration script to (a) create tables for annotating objects and (b) create tags for workflow steps. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Index, - Integer, - MetaData, - Table, - TEXT, - Unicode, -) - -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -# Annotation tables. - -HistoryAnnotationAssociation_table = Table( - "history_annotation_association", - metadata, - Column("id", Integer, primary_key=True), - Column("history_id", Integer, ForeignKey("history.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("annotation", TEXT), - Index("ix_history_anno_assoc_annotation", "annotation", mysql_length=200), -) - -HistoryDatasetAssociationAnnotationAssociation_table = Table( - "history_dataset_association_annotation_association", - metadata, - Column("id", Integer, primary_key=True), - Column("history_dataset_association_id", Integer, ForeignKey("history_dataset_association.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("annotation", TEXT), - Index("ix_history_dataset_anno_assoc_annotation", "annotation", mysql_length=200), -) - -StoredWorkflowAnnotationAssociation_table = Table( - "stored_workflow_annotation_association", - metadata, - Column("id", Integer, primary_key=True), - Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("annotation", TEXT), - Index("ix_stored_workflow_ann_assoc_annotation", "annotation", mysql_length=200), -) - -WorkflowStepAnnotationAssociation_table = Table( - "workflow_step_annotation_association", - metadata, - Column("id", Integer, primary_key=True), - Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("annotation", TEXT), - Index("ix_workflow_step_ann_assoc_annotation", "annotation", mysql_length=200), -) - -# Tagging tables. - -WorkflowStepTagAssociation_table = Table( - "workflow_step_tag_association", - metadata, - Column("id", Integer, primary_key=True), - Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True), - Column("tag_id", Integer, ForeignKey("tag.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("user_tname", Unicode(255), index=True), - Column("value", Unicode(255), index=True), - Column("user_value", Unicode(255), index=True), -) - -TABLES = [ - HistoryAnnotationAssociation_table, - HistoryDatasetAssociationAnnotationAssociation_table, - StoredWorkflowAnnotationAssociation_table, - WorkflowStepAnnotationAssociation_table, - WorkflowStepTagAssociation_table, -] - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - for table in TABLES: - create_table(table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - for table in TABLES: - drop_table(table) diff --git a/lib/galaxy/model/migrate/versions/0036_add_deleted_column_to_library_template_assoc_tables.py b/lib/galaxy/model/migrate/versions/0036_add_deleted_column_to_library_template_assoc_tables.py deleted file mode 100644 index 7fc398a4ea2c..000000000000 --- a/lib/galaxy/model/migrate/versions/0036_add_deleted_column_to_library_template_assoc_tables.py +++ /dev/null @@ -1,69 +0,0 @@ -""" -Migration script to add a deleted column to the following tables: -library_info_association, library_folder_info_association, library_dataset_dataset_info_association. -""" - -import logging - -from sqlalchemy import ( - Boolean, - Column, - MetaData, - Table, -) - -from galaxy.model.migrate.versions.util import engine_false - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - try: - LibraryInfoAssociation_table = Table("library_info_association", metadata, autoload=True) - c = Column("deleted", Boolean, index=True, default=False) - c.create(LibraryInfoAssociation_table, index_name="ix_library_info_association_deleted") - assert c is LibraryInfoAssociation_table.c.deleted - except Exception: - log.exception("Adding column 'deleted' to 'library_info_association' table failed.") - cmd = f"UPDATE library_info_association SET deleted = {engine_false(migrate_engine)}" - try: - migrate_engine.execute(cmd) - except Exception: - log.exception("deleted to false in library_info_association failed.") - try: - LibraryFolderInfoAssociation_table = Table("library_folder_info_association", metadata, autoload=True) - c = Column("deleted", Boolean, index=True, default=False) - c.create(LibraryFolderInfoAssociation_table, index_name="ix_library_folder_info_association_deleted") - assert c is LibraryFolderInfoAssociation_table.c.deleted - except Exception: - log.exception("Adding column 'deleted' to 'library_folder_info_association' table failed.") - cmd = f"UPDATE library_folder_info_association SET deleted = {engine_false(migrate_engine)}" - try: - migrate_engine.execute(cmd) - except Exception: - log.exception("deleted to false in library_folder_info_association failed.") - try: - LibraryDatasetDatasetInfoAssociation_table = Table( - "library_dataset_dataset_info_association", metadata, autoload=True - ) - c = Column("deleted", Boolean, index=True, default=False) - c.create( - LibraryDatasetDatasetInfoAssociation_table, index_name="ix_library_dataset_dataset_info_association_deleted" - ) - assert c is LibraryDatasetDatasetInfoAssociation_table.c.deleted - except Exception: - log.exception("Adding column 'deleted' to 'library_dataset_dataset_info_association' table failed.") - cmd = f"UPDATE library_dataset_dataset_info_association SET deleted = {engine_false(migrate_engine)}" - try: - migrate_engine.execute(cmd) - except Exception: - log.exception("deleted to false in library_dataset_dataset_info_association failed.") - - -def downgrade(migrate_engine): - pass diff --git a/lib/galaxy/model/migrate/versions/0037_samples_library.py b/lib/galaxy/model/migrate/versions/0037_samples_library.py deleted file mode 100644 index ec536e88e2f5..000000000000 --- a/lib/galaxy/model/migrate/versions/0037_samples_library.py +++ /dev/null @@ -1,78 +0,0 @@ -""" -This migration script removes the library_id & folder_id fields in the 'request' table and -adds the same to the 'sample' table. This also adds a 'datatx' column to request_type table -to store the sequencer login information. Finally, this adds a 'dataset_files' column to -the sample table. -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - Table, -) - -from galaxy.model.custom_types import JSONType -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - # Add the datatx_info column in 'request_type' table - col = Column("datatx_info", JSONType) - add_column(col, "request_type", metadata) - - # Delete the library_id column in 'request' table - Request_table = Table("request", metadata, autoload=True) - # TODO: Dropping a column used in a foreign key fails in MySQL, need to remove the FK first. - drop_column("library_id", Request_table) - - # Delete the folder_id column in 'request' table - # TODO: Dropping a column used in a foreign key fails in MySQL, need to remove the FK first. - drop_column("folder_id", Request_table) - - # Add the dataset_files column in 'sample' table - Sample_table = Table("sample", metadata, autoload=True) - col = Column("dataset_files", JSONType) - add_column(col, Sample_table, metadata) - - # Add the library_id column in 'sample' table - col = Column("library_id", Integer, ForeignKey("library.id"), index=True) - add_column(col, Sample_table, metadata, index_name="ix_sample_library_id") - - # Add the library_id column in 'sample' table - col = Column("folder_id", Integer, ForeignKey("library_folder.id"), index=True) - add_column(col, Sample_table, metadata, index_name="ix_sample_library_folder_id") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - Sample_table = Table("sample", metadata, autoload=True) - drop_column("folder_id", Sample_table) - drop_column("library_id", Sample_table) - drop_column("dataset_files", Sample_table) - - Request_table = Table("request", metadata, autoload=True) - col = Column("folder_id", Integer, ForeignKey("library_folder.id"), index=True) - add_column(col, Request_table, metadata, index_name="ix_request_folder_id") - - col = Column("library_id", Integer, ForeignKey("library.id"), index=True) - add_column(col, Request_table, metadata, index_name="ix_request_library_id") - - drop_column("datatx_info", "request_type", metadata) diff --git a/lib/galaxy/model/migrate/versions/0038_add_inheritable_column_to_library_template_assoc_tables.py b/lib/galaxy/model/migrate/versions/0038_add_inheritable_column_to_library_template_assoc_tables.py deleted file mode 100644 index 1e0d4ada2b55..000000000000 --- a/lib/galaxy/model/migrate/versions/0038_add_inheritable_column_to_library_template_assoc_tables.py +++ /dev/null @@ -1,87 +0,0 @@ -""" -Migration script to add an inheritable column to the following tables: -library_info_association, library_folder_info_association. -Also, in case of sqlite check if the previous migration script deleted the -request table and if so, restore the table. -""" - -import datetime -import logging - -from sqlalchemy import ( - Boolean, - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, - TEXT, -) - -from galaxy.model.custom_types import TrimmedString -from galaxy.model.migrate.versions.util import engine_false - -now = datetime.datetime.utcnow -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - - # In case of sqlite, check if the previous migration script deleted the - # request table and if so, restore the table. - if migrate_engine.name == "sqlite": - if not migrate_engine.has_table("request"): - # load the tables referenced in foreign keys - metadata.reflect(only=["form_values", "request_type", "galaxy_user"]) - # create a temporary table - Request_table = Table( - "request", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("name", TrimmedString(255), nullable=False), - Column("desc", TEXT), - Column("form_values_id", Integer, ForeignKey("form_values.id"), index=True), - Column("request_type_id", Integer, ForeignKey("request_type.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("deleted", Boolean, index=True, default=False), - ) - try: - Request_table.create() - except Exception: - log.exception("Creating request table failed.") - - metadata.reflect() - try: - LibraryInfoAssociation_table = Table("library_info_association", metadata, autoload=True) - c = Column("inheritable", Boolean, index=True, default=False) - c.create(LibraryInfoAssociation_table, index_name="ix_library_info_association_inheritable") - assert c is LibraryInfoAssociation_table.c.inheritable - except Exception: - log.exception("Adding column 'inheritable' to 'library_info_association' table failed.") - cmd = f"UPDATE library_info_association SET inheritable = {engine_false(migrate_engine)}" - try: - migrate_engine.execute(cmd) - except Exception: - log.exception("Setting value of column inheritable to false in library_info_association failed.") - try: - LibraryFolderInfoAssociation_table = Table("library_folder_info_association", metadata, autoload=True) - c = Column("inheritable", Boolean, index=True, default=False) - c.create(LibraryFolderInfoAssociation_table, index_name="ix_library_folder_info_association_inheritable") - assert c is LibraryFolderInfoAssociation_table.c.inheritable - except Exception: - log.exception("Adding column 'inheritable' to 'library_folder_info_association' table failed.") - cmd = f"UPDATE library_folder_info_association SET inheritable = {engine_false(migrate_engine)}" - try: - migrate_engine.execute(cmd) - except Exception: - log.exception("Setting value of column inheritable to false in library_folder_info_association failed.") - - -def downgrade(migrate_engine): - pass diff --git a/lib/galaxy/model/migrate/versions/0039_add_synopsis_column_to_library_table.py b/lib/galaxy/model/migrate/versions/0039_add_synopsis_column_to_library_table.py deleted file mode 100644 index 469987c29db2..000000000000 --- a/lib/galaxy/model/migrate/versions/0039_add_synopsis_column_to_library_table.py +++ /dev/null @@ -1,38 +0,0 @@ -""" -Migration script to add a synopsis column to the library table. -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, - Table, - TEXT, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - try: - Library_table = Table("library", metadata, autoload=True) - c = Column("synopsis", TEXT) - c.create(Library_table) - assert c is Library_table.c.synopsis - except Exception: - log.exception("Adding column 'synopsis' to 'library' table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - try: - Library_table = Table("library", metadata, autoload=True) - Library_table.c.synopsis.drop() - except Exception: - log.exception("Dropping column 'synopsis' from 'library' table failed") diff --git a/lib/galaxy/model/migrate/versions/0040_page_annotations.py b/lib/galaxy/model/migrate/versions/0040_page_annotations.py deleted file mode 100644 index 35b12b27a3c0..000000000000 --- a/lib/galaxy/model/migrate/versions/0040_page_annotations.py +++ /dev/null @@ -1,48 +0,0 @@ -""" -Migration script to (a) create tables for annotating pages. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Index, - Integer, - MetaData, - Table, - TEXT, -) - -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -PageAnnotationAssociation_table = Table( - "page_annotation_association", - metadata, - Column("id", Integer, primary_key=True), - Column("page_id", Integer, ForeignKey("page.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("annotation", TEXT), - Index("ix_page_annotation_association_annotation", "annotation", mysql_length=200), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(PageAnnotationAssociation_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_table(PageAnnotationAssociation_table) diff --git a/lib/galaxy/model/migrate/versions/0041_workflow_invocation.py b/lib/galaxy/model/migrate/versions/0041_workflow_invocation.py deleted file mode 100644 index a30d56d53753..000000000000 --- a/lib/galaxy/model/migrate/versions/0041_workflow_invocation.py +++ /dev/null @@ -1,63 +0,0 @@ -""" -Migration script to create tables for tracking workflow invocations. -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, -) - -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - -WorkflowInvocation_table = Table( - "workflow_invocation", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("workflow_id", Integer, ForeignKey("workflow.id"), index=True, nullable=False), -) - -WorkflowInvocationStep_table = Table( - "workflow_invocation_step", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True, nullable=False), - Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True, nullable=False), - Column("job_id", Integer, ForeignKey("job.id"), index=True, nullable=False), -) - -tables = [WorkflowInvocation_table, WorkflowInvocationStep_table] - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - for table in tables: - create_table(table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - for table in reversed(tables): - drop_table(table) diff --git a/lib/galaxy/model/migrate/versions/0042_workflow_invocation_fix.py b/lib/galaxy/model/migrate/versions/0042_workflow_invocation_fix.py deleted file mode 100644 index 0164a00746c5..000000000000 --- a/lib/galaxy/model/migrate/versions/0042_workflow_invocation_fix.py +++ /dev/null @@ -1,64 +0,0 @@ -""" -Drop and readd workflow invocation tables, allowing null jobs -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, -) - -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - # 1) Drop - for table_name in ["workflow_invocation_step", "workflow_invocation"]: - t = Table(table_name, metadata, autoload=True) - drop_table(t) - metadata.remove(t) - - # 2) Re-add - WorkflowInvocation_table = Table( - "workflow_invocation", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("workflow_id", Integer, ForeignKey("workflow.id"), index=True, nullable=False), - ) - - WorkflowInvocationStep_table = Table( - "workflow_invocation_step", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True, nullable=False), - Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True, nullable=False), - Column("job_id", Integer, ForeignKey("job.id"), index=True, nullable=True), - ) - - for table in [WorkflowInvocation_table, WorkflowInvocationStep_table]: - create_table(table) - - -def downgrade(migrate_engine): - pass diff --git a/lib/galaxy/model/migrate/versions/0043_visualization_sharing_tagging_annotating.py b/lib/galaxy/model/migrate/versions/0043_visualization_sharing_tagging_annotating.py deleted file mode 100644 index 833371fb2696..000000000000 --- a/lib/galaxy/model/migrate/versions/0043_visualization_sharing_tagging_annotating.py +++ /dev/null @@ -1,129 +0,0 @@ -""" -Migration script to create tables and columns for sharing visualizations. -""" - -import logging - -from sqlalchemy import ( - Boolean, - Column, - ForeignKey, - Index, - Integer, - MetaData, - Table, - TEXT, - Unicode, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - add_index, - create_table, - drop_column, - drop_table, - engine_false, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -# Sharing visualizations. - -VisualizationUserShareAssociation_table = Table( - "visualization_user_share_association", - metadata, - Column("id", Integer, primary_key=True), - Column("visualization_id", Integer, ForeignKey("visualization.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), -) - -# Tagging visualizations. - -VisualizationTagAssociation_table = Table( - "visualization_tag_association", - metadata, - Column("id", Integer, primary_key=True), - Column("visualization_id", Integer, ForeignKey("visualization.id"), index=True), - Column("tag_id", Integer, ForeignKey("tag.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("user_tname", Unicode(255), index=True), - Column("value", Unicode(255), index=True), - Column("user_value", Unicode(255), index=True), -) - -# Annotating visualizations. - -VisualizationAnnotationAssociation_table = Table( - "visualization_annotation_association", - metadata, - Column("id", Integer, primary_key=True), - Column("visualization_id", Integer, ForeignKey("visualization.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("annotation", TEXT), - Index("ix_visualization_annotation_association_annotation", "annotation", mysql_length=200), -) - -TABLES = [ - VisualizationUserShareAssociation_table, - VisualizationTagAssociation_table, - VisualizationAnnotationAssociation_table, -] - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - for table in TABLES: - create_table(table) - - # Add columns & create indices for supporting sharing to visualization table. - Visualization_table = Table("visualization", metadata, autoload=True) - deleted_column = Column("deleted", Boolean, default=False, index=True) - add_column(deleted_column, Visualization_table, metadata, index_name="ix_visualization_deleted") - try: - # Fill column with default value. - cmd = f"UPDATE visualization SET deleted = {engine_false(migrate_engine)}" - migrate_engine.execute(cmd) - except Exception: - log.exception("Updating column 'deleted' of table 'visualization' failed.") - - importable_column = Column("importable", Boolean, default=False, index=True) - add_column(importable_column, Visualization_table, metadata, index_name="ix_visualization_importable") - try: - # Fill column with default value. - cmd = f"UPDATE visualization SET importable = {engine_false(migrate_engine)}" - migrate_engine.execute(cmd) - except Exception: - log.exception("Updating column 'importable' of table 'visualization' failed.") - - slug_column = Column("slug", TEXT) - add_column(slug_column, Visualization_table, metadata) - # Index needs to be added separately because MySQL cannot index a TEXT/BLOB - # column without specifying mysql_length - add_index("ix_visualization_slug", Visualization_table, "slug") - - published_column = Column("published", Boolean, index=True) - add_column(published_column, Visualization_table, metadata, index_name="ix_visualization_published") - try: - # Fill column with default value. - cmd = f"UPDATE visualization SET published = {engine_false(migrate_engine)}" - migrate_engine.execute(cmd) - except Exception: - log.exception("Updating column 'published' of table 'visualization' failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - Visualization_table = Table("visualization", metadata, autoload=True) - drop_column("deleted", Visualization_table) - drop_column("importable", Visualization_table) - drop_column("slug", Visualization_table) - drop_column("published", Visualization_table) - - for table in TABLES: - drop_table(table) diff --git a/lib/galaxy/model/migrate/versions/0044_add_notify_column_to_request_table.py b/lib/galaxy/model/migrate/versions/0044_add_notify_column_to_request_table.py deleted file mode 100644 index f9257ec9e8fc..000000000000 --- a/lib/galaxy/model/migrate/versions/0044_add_notify_column_to_request_table.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -Migration script to add a notify column to the request table. -""" - -import logging - -from sqlalchemy import ( - Boolean, - Column, - MetaData, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - c = Column("notify", Boolean, default=False) - add_column(c, "request", metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("notify", "request", metadata) diff --git a/lib/galaxy/model/migrate/versions/0045_request_type_permissions_table.py b/lib/galaxy/model/migrate/versions/0045_request_type_permissions_table.py deleted file mode 100644 index e32db165e51a..000000000000 --- a/lib/galaxy/model/migrate/versions/0045_request_type_permissions_table.py +++ /dev/null @@ -1,51 +0,0 @@ -""" -Migration script to add the request_type_permissions table. -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, - TEXT, -) - -now = datetime.datetime.utcnow -log = logging.getLogger(__name__) -metadata = MetaData() - -RequestTypePermissions_table = Table( - "request_type_permissions", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("action", TEXT), - Column("request_type_id", Integer, ForeignKey("request_type.id"), nullable=True, index=True), - Column("role_id", Integer, ForeignKey("role.id"), index=True), -) - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - try: - RequestTypePermissions_table.create() - except Exception: - log.exception("Creating request_type_permissions table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - try: - RequestTypePermissions_table = Table("request_type_permissions", metadata, autoload=True) - RequestTypePermissions_table.drop() - except Exception: - log.exception("Dropping 'request_type_permissions' table failed.") diff --git a/lib/galaxy/model/migrate/versions/0046_post_job_actions.py b/lib/galaxy/model/migrate/versions/0046_post_job_actions.py deleted file mode 100644 index c2dc6fa0b4e2..000000000000 --- a/lib/galaxy/model/migrate/versions/0046_post_job_actions.py +++ /dev/null @@ -1,46 +0,0 @@ -""" -Migration script to create tables for handling post-job actions. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - String, - Table, -) - -from galaxy.model.custom_types import JSONType -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -PostJobAction_table = Table( - "post_job_action", - metadata, - Column("id", Integer, primary_key=True), - Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True, nullable=False), - Column("action_type", String(255), nullable=False), - Column("output_name", String(255), nullable=True), - Column("action_arguments", JSONType, nullable=True), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - create_table(PostJobAction_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - drop_table(PostJobAction_table) diff --git a/lib/galaxy/model/migrate/versions/0047_job_table_user_id_column.py b/lib/galaxy/model/migrate/versions/0047_job_table_user_id_column.py deleted file mode 100644 index b24b89dbc60a..000000000000 --- a/lib/galaxy/model/migrate/versions/0047_job_table_user_id_column.py +++ /dev/null @@ -1,61 +0,0 @@ -""" -Add a user_id column to the job table. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - Table, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - Job_table = Table("job", metadata, autoload=True) - col = Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=True) - add_column(col, Job_table, metadata, index_name="ix_job_user_id") - try: - cmd = ( - "SELECT job.id AS galaxy_job_id, " - + "galaxy_session.user_id AS galaxy_user_id " - + "FROM job " - + "JOIN galaxy_session ON job.session_id = galaxy_session.id;" - ) - job_users = migrate_engine.execute(cmd).fetchall() - print("Updating user_id column in job table for ", len(job_users), " rows...") - print("") - update_count = 0 - for row in job_users: - if row.galaxy_user_id: - cmd = "UPDATE job SET user_id = %d WHERE id = %d" % (int(row.galaxy_user_id), int(row.galaxy_job_id)) - update_count += 1 - migrate_engine.execute(cmd) - print("Updated column 'user_id' for ", update_count, " rows of table 'job'.") - print( - len(job_users) - update_count, " rows have no user_id since the value was NULL in the galaxy_session table." - ) - print("") - except Exception: - log.exception("Updating column 'user_id' of table 'job' failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("user_id", "job", metadata) diff --git a/lib/galaxy/model/migrate/versions/0048_dataset_instance_state_column.py b/lib/galaxy/model/migrate/versions/0048_dataset_instance_state_column.py deleted file mode 100644 index 5187ce14bc49..000000000000 --- a/lib/galaxy/model/migrate/versions/0048_dataset_instance_state_column.py +++ /dev/null @@ -1,40 +0,0 @@ -""" -Add a state column to the history_dataset_association and library_dataset_dataset_association table. -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, -) - -from galaxy.model.custom_types import TrimmedString -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -DATASET_INSTANCE_TABLE_NAMES = ["history_dataset_association", "library_dataset_dataset_association"] - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - for table_name in DATASET_INSTANCE_TABLE_NAMES: - col = Column("state", TrimmedString(64), index=True, nullable=True) - index_name = f"ix_{table_name}_state" - add_column(col, table_name, metadata, index_name=index_name) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - for table_name in DATASET_INSTANCE_TABLE_NAMES: - drop_column("state", table_name, metadata) diff --git a/lib/galaxy/model/migrate/versions/0049_api_keys_table.py b/lib/galaxy/model/migrate/versions/0049_api_keys_table.py deleted file mode 100644 index 5bde68d9201b..000000000000 --- a/lib/galaxy/model/migrate/versions/0049_api_keys_table.py +++ /dev/null @@ -1,50 +0,0 @@ -""" -Migration script to add the api_keys table. -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, -) - -from galaxy.model.custom_types import TrimmedString - -now = datetime.datetime.utcnow -log = logging.getLogger(__name__) -metadata = MetaData() - -APIKeys_table = Table( - "api_keys", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("key", TrimmedString(32), index=True, unique=True), -) - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - try: - APIKeys_table.create() - except Exception: - log.exception("Creating api_keys table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - # Load existing tables - metadata.reflect() - try: - APIKeys_table.drop() - except Exception: - log.exception("Dropping api_keys table failed.") diff --git a/lib/galaxy/model/migrate/versions/0050_drop_cloud_tables.py b/lib/galaxy/model/migrate/versions/0050_drop_cloud_tables.py deleted file mode 100644 index 3b7abf2ebfd5..000000000000 --- a/lib/galaxy/model/migrate/versions/0050_drop_cloud_tables.py +++ /dev/null @@ -1,183 +0,0 @@ -""" -This script drops tables that were associated with the old Galaxy Cloud functionality. -""" - -import datetime -import logging - -from sqlalchemy import ( - Boolean, - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, - TEXT, -) - -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -now = datetime.datetime.utcnow -log = logging.getLogger(__name__) -metadata = MetaData() - -CloudImage_table = Table( - "cloud_image", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("provider_type", TEXT), - Column("image_id", TEXT, nullable=False), - Column("manifest", TEXT), - Column("state", TEXT), - Column("architecture", TEXT), - Column("deleted", Boolean, default=False), -) - -""" UserConfiguredInstance (UCI) table """ -UCI_table = Table( - "cloud_uci", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=False), - Column("credentials_id", Integer, ForeignKey("cloud_user_credentials.id"), index=True), - Column("key_pair_name", TEXT), - Column("key_pair_material", TEXT), - Column("name", TEXT), - Column("state", TEXT), - Column("error", TEXT), - Column("total_size", Integer), - Column("launch_time", DateTime), - Column("deleted", Boolean, default=False), -) - -CloudInstance_table = Table( - "cloud_instance", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("launch_time", DateTime), - Column("stop_time", DateTime), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=False), - Column("uci_id", Integer, ForeignKey("cloud_uci.id"), index=True), - Column("type", TEXT), - Column("reservation_id", TEXT), - Column("instance_id", TEXT), - Column("mi_id", Integer, ForeignKey("cloud_image.id"), index=True), - Column("state", TEXT), - Column("error", TEXT), - Column("public_dns", TEXT), - Column("private_dns", TEXT), - Column("security_group", TEXT), - Column("availability_zone", TEXT), -) - -CloudStore_table = Table( - "cloud_store", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("attach_time", DateTime), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=False), - Column("uci_id", Integer, ForeignKey("cloud_uci.id"), index=True, nullable=False), - Column("volume_id", TEXT), - Column("size", Integer, nullable=False), - Column("availability_zone", TEXT), - Column("inst_id", Integer, ForeignKey("cloud_instance.id")), - Column("status", TEXT), - Column("device", TEXT), - Column("space_consumed", Integer), - Column("error", TEXT), - Column("deleted", Boolean, default=False), -) - -CloudSnapshot_table = Table( - "cloud_snapshot", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=False), - Column("uci_id", Integer, ForeignKey("cloud_uci.id"), index=True), - Column("store_id", Integer, ForeignKey("cloud_store.id"), index=True, nullable=False), - Column("snapshot_id", TEXT), - Column("status", TEXT), - Column("description", TEXT), - Column("error", TEXT), - Column("deleted", Boolean, default=False), -) - -CloudUserCredentials_table = Table( - "cloud_user_credentials", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=False), - Column("provider_id", Integer, ForeignKey("cloud_provider.id"), index=True, nullable=False), - Column("name", TEXT), - Column("access_key", TEXT), - Column("secret_key", TEXT), - Column("deleted", Boolean, default=False), -) - -CloudProvider_table = Table( - "cloud_provider", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=False), - Column("type", TEXT, nullable=False), - Column("name", TEXT), - Column("region_connection", TEXT), - Column("region_name", TEXT), - Column("region_endpoint", TEXT), - Column("is_secure", Boolean), - Column("host", TEXT), - Column("port", Integer), - Column("proxy", TEXT), - Column("proxy_port", TEXT), - Column("proxy_user", TEXT), - Column("proxy_pass", TEXT), - Column("debug", Integer), - Column("https_connection_factory", TEXT), - Column("path", TEXT), - Column("deleted", Boolean, default=False), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - drop_table(CloudSnapshot_table) - drop_table(CloudStore_table) - drop_table(CloudInstance_table) - drop_table(UCI_table) - drop_table(CloudImage_table) - drop_table(CloudUserCredentials_table) - drop_table(CloudProvider_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - create_table(CloudProvider_table) - create_table(CloudUserCredentials_table) - create_table(CloudImage_table) - create_table(UCI_table) - create_table(CloudInstance_table) - create_table(CloudStore_table) - create_table(CloudSnapshot_table) diff --git a/lib/galaxy/model/migrate/versions/0051_imported_col_for_jobs_table.py b/lib/galaxy/model/migrate/versions/0051_imported_col_for_jobs_table.py deleted file mode 100644 index 410e78d2dca8..000000000000 --- a/lib/galaxy/model/migrate/versions/0051_imported_col_for_jobs_table.py +++ /dev/null @@ -1,43 +0,0 @@ -""" -Migration script to add imported column for jobs table. -""" - -import logging - -from sqlalchemy import ( - Boolean, - Column, - MetaData, - Table, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, - engine_false, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - # Create and initialize imported column in job table. - Jobs_table = Table("job", metadata, autoload=True) - c = Column("imported", Boolean, default=False, index=True) - add_column(c, Jobs_table, metadata, index_name="ix_job_imported") - try: - migrate_engine.execute(f"UPDATE job SET imported={engine_false(migrate_engine)}") - except Exception: - log.exception("Updating column 'imported' of table 'job' failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("imported", "job", metadata) diff --git a/lib/galaxy/model/migrate/versions/0052_sample_dataset_table.py b/lib/galaxy/model/migrate/versions/0052_sample_dataset_table.py deleted file mode 100644 index c597856ad91d..000000000000 --- a/lib/galaxy/model/migrate/versions/0052_sample_dataset_table.py +++ /dev/null @@ -1,97 +0,0 @@ -""" -Migration script to add the sample_dataset table and remove the 'dataset_files' column -from the 'sample' table -""" - -import datetime -import logging -from json import loads - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, - TEXT, -) -from sqlalchemy.exc import NoSuchTableError - -from galaxy.model.custom_types import TrimmedString -from galaxy.model.migrate.versions.util import ( - localtimestamp, - nextval, -) - -now = datetime.datetime.utcnow -log = logging.getLogger(__name__) -metadata = MetaData() - - -SampleDataset_table = Table( - "sample_dataset", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("sample_id", Integer, ForeignKey("sample.id"), index=True), - Column("name", TrimmedString(255), nullable=False), - Column("file_path", TrimmedString(255), nullable=False), - Column("status", TrimmedString(255), nullable=False), - Column("error_msg", TEXT), - Column("size", TrimmedString(255)), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - try: - SampleDataset_table.create() - except Exception: - log.exception("Creating sample_dataset table failed.") - - cmd = "SELECT id, dataset_files FROM sample" - result = migrate_engine.execute(cmd) - for r in result: - sample_id = r[0] - if r[1]: - dataset_files = loads(r[1]) - for df in dataset_files: - if isinstance(df, dict): - cmd = "INSERT INTO sample_dataset VALUES (%s, %s, %s, %s, '%s', '%s', '%s', '%s', '%s')" - cmd = cmd % ( - nextval(migrate_engine, "sample_dataset"), - localtimestamp(migrate_engine), - localtimestamp(migrate_engine), - str(sample_id), - df.get("name", ""), - df.get("filepath", ""), - df.get("status", "").replace('"', "").replace("'", ""), - "", - df.get("size", "") - .replace('"', "") - .replace("'", "") - .replace(df.get("filepath", ""), "") - .strip(), - ) - migrate_engine.execute(cmd) - - # Delete the dataset_files column in the Sample table - try: - Sample_table = Table("sample", metadata, autoload=True) - except NoSuchTableError: - Sample_table = None - log.debug("Failed loading table sample") - if Sample_table is not None: - try: - Sample_table.c.dataset_files.drop() - except Exception: - log.exception("Deleting column 'dataset_files' from the 'sample' table failed.") - - -def downgrade(migrate_engine): - pass diff --git a/lib/galaxy/model/migrate/versions/0053_item_ratings.py b/lib/galaxy/model/migrate/versions/0053_item_ratings.py deleted file mode 100644 index c3fa9b91b6a9..000000000000 --- a/lib/galaxy/model/migrate/versions/0053_item_ratings.py +++ /dev/null @@ -1,107 +0,0 @@ -""" -Migration script to create tables for rating histories, datasets, workflows, pages, and visualizations. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - Table, -) - -from galaxy.model.migrate.versions.util import ( - add_index, - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -# Rating tables. -HistoryRatingAssociation_table = Table( - "history_rating_association", - metadata, - Column("id", Integer, primary_key=True), - Column("history_id", Integer, ForeignKey("history.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("rating", Integer, index=True), -) - -HistoryDatasetAssociationRatingAssociation_table = Table( - "history_dataset_association_rating_association", - metadata, - Column("id", Integer, primary_key=True), - Column("history_dataset_association_id", Integer, ForeignKey("history_dataset_association.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("rating", Integer, index=True), -) - -StoredWorkflowRatingAssociation_table = Table( - "stored_workflow_rating_association", - metadata, - Column("id", Integer, primary_key=True), - Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("rating", Integer, index=True), -) - -PageRatingAssociation_table = Table( - "page_rating_association", - metadata, - Column("id", Integer, primary_key=True), - Column("page_id", Integer, ForeignKey("page.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("rating", Integer, index=True), -) - -VisualizationRatingAssociation_table = Table( - "visualization_rating_association", - metadata, - Column("id", Integer, primary_key=True), - Column("visualization_id", Integer, ForeignKey("visualization.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("rating", Integer, index=True), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(HistoryRatingAssociation_table) - - # Create history_dataset_association_rating_association table. - try: - HistoryDatasetAssociationRatingAssociation_table.create() - except Exception as e: - # MySQL cannot handle long index names; when we see this error, create the index name manually. - if migrate_engine.name == "mysql" and str(e).lower().find( - "identifier name 'ix_history_dataset_association_rating_association_history_dataset_association_id' is too long" - ): - add_index( - "ix_hda_rating_association_hda_id", - HistoryDatasetAssociationRatingAssociation_table, - "history_dataset_association_id", - ) - else: - log.exception("Creating history_dataset_association_rating_association table failed.") - - create_table(StoredWorkflowRatingAssociation_table) - create_table(PageRatingAssociation_table) - create_table(VisualizationRatingAssociation_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_table(VisualizationRatingAssociation_table) - drop_table(PageRatingAssociation_table) - drop_table(StoredWorkflowRatingAssociation_table) - drop_table(HistoryDatasetAssociationRatingAssociation_table) - drop_table(HistoryRatingAssociation_table) diff --git a/lib/galaxy/model/migrate/versions/0054_visualization_dbkey.py b/lib/galaxy/model/migrate/versions/0054_visualization_dbkey.py deleted file mode 100644 index a5c42e4e8dff..000000000000 --- a/lib/galaxy/model/migrate/versions/0054_visualization_dbkey.py +++ /dev/null @@ -1,61 +0,0 @@ -""" -Migration script to add dbkey column for visualization. -""" - -import logging -from json import loads - -from sqlalchemy import ( - Column, - MetaData, - Table, - TEXT, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - add_index, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - Visualization_table = Table("visualization", metadata, autoload=True) - Visualization_revision_table = Table("visualization_revision", metadata, autoload=True) - - # Create dbkey columns. - x = Column("dbkey", TEXT) - add_column(x, Visualization_table, metadata) - y = Column("dbkey", TEXT) - add_column(y, Visualization_revision_table, metadata) - # Indexes need to be added separately because MySQL cannot index a TEXT/BLOB - # column without specifying mysql_length - add_index("ix_visualization_dbkey", Visualization_table, "dbkey") - add_index("ix_visualization_revision_dbkey", Visualization_revision_table, "dbkey") - - all_viz = migrate_engine.execute( - "SELECT visualization.id as viz_id, visualization_revision.id as viz_rev_id, visualization_revision.config FROM visualization_revision \ - LEFT JOIN visualization ON visualization.id=visualization_revision.visualization_id" - ) - for viz in all_viz: - viz_id = viz["viz_id"] - viz_rev_id = viz["viz_rev_id"] - if viz[Visualization_revision_table.c.config]: - dbkey = loads(viz[Visualization_revision_table.c.config]).get("dbkey", "").replace("'", "\\'") - migrate_engine.execute(f"UPDATE visualization_revision SET dbkey='{dbkey}' WHERE id={viz_rev_id}") - migrate_engine.execute(f"UPDATE visualization SET dbkey='{dbkey}' WHERE id={viz_id}") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("dbkey", "visualization", metadata) - drop_column("dbkey", "visualization_revision", metadata) diff --git a/lib/galaxy/model/migrate/versions/0055_add_pja_assoc_for_jobs.py b/lib/galaxy/model/migrate/versions/0055_add_pja_assoc_for_jobs.py deleted file mode 100644 index ae05437922d1..000000000000 --- a/lib/galaxy/model/migrate/versions/0055_add_pja_assoc_for_jobs.py +++ /dev/null @@ -1,44 +0,0 @@ -""" -Migration script to add the post_job_action_association table. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - Table, -) - -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -PostJobActionAssociation_table = Table( - "post_job_action_association", - metadata, - Column("id", Integer, primary_key=True), - Column("post_job_action_id", Integer, ForeignKey("post_job_action.id"), index=True, nullable=False), - Column("job_id", Integer, ForeignKey("job.id"), index=True, nullable=False), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(PostJobActionAssociation_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_table(PostJobActionAssociation_table) diff --git a/lib/galaxy/model/migrate/versions/0056_workflow_outputs.py b/lib/galaxy/model/migrate/versions/0056_workflow_outputs.py deleted file mode 100644 index 08de99ff7002..000000000000 --- a/lib/galaxy/model/migrate/versions/0056_workflow_outputs.py +++ /dev/null @@ -1,43 +0,0 @@ -""" -Migration script to create tables for adding explicit workflow outputs. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - String, - Table, -) - -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -WorkflowOutput_table = Table( - "workflow_output", - metadata, - Column("id", Integer, primary_key=True), - Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True, nullable=False), - Column("output_name", String(255), nullable=True), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - create_table(WorkflowOutput_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - drop_table(WorkflowOutput_table) diff --git a/lib/galaxy/model/migrate/versions/0057_request_notify.py b/lib/galaxy/model/migrate/versions/0057_request_notify.py deleted file mode 100644 index 3a134efc7fd1..000000000000 --- a/lib/galaxy/model/migrate/versions/0057_request_notify.py +++ /dev/null @@ -1,59 +0,0 @@ -""" -Migration script to modify the 'notify' field in the 'request' table from a boolean -to a JSONType -""" - -import logging -from json import dumps - -from sqlalchemy import ( - Boolean, - Column, - MetaData, - Table, -) - -from galaxy.model.custom_types import JSONType -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - Request_table = Table("request", metadata, autoload=True) - - # create the column again as JSONType - col = Column("notification", JSONType) - add_column(col, Request_table, metadata) - - cmd = "SELECT id, user_id, notify FROM request" - result = migrate_engine.execute(cmd) - for r in result: - id = int(r[0]) - notify_new = dict(email=[], sample_states=[], body="", subject="") - cmd = "UPDATE request SET notification='%s' WHERE id=%i" % (dumps(notify_new), id) - migrate_engine.execute(cmd) - - # SQLAlchemy Migrate has a bug when dropping a boolean column in SQLite - if migrate_engine.name != "sqlite": - drop_column("notify", Request_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - Request_table = Table("request", metadata, autoload=True) - if migrate_engine.name != "sqlite": - c = Column("notify", Boolean, default=False) - add_column(c, Request_table, metadata) - - drop_column("notification", Request_table) diff --git a/lib/galaxy/model/migrate/versions/0058_history_import_export.py b/lib/galaxy/model/migrate/versions/0058_history_import_export.py deleted file mode 100644 index 669111d25899..000000000000 --- a/lib/galaxy/model/migrate/versions/0058_history_import_export.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -Migration script to create table for exporting histories to archives. -""" - -import logging - -from sqlalchemy import ( - Boolean, - Column, - ForeignKey, - Integer, - MetaData, - Table, - TEXT, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -# Table to add. - -JobExportHistoryArchive_table = Table( - "job_export_history_archive", - metadata, - Column("id", Integer, primary_key=True), - Column("job_id", Integer, ForeignKey("job.id"), index=True), - Column("history_id", Integer, ForeignKey("history.id"), index=True), - Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True), - Column("compressed", Boolean, index=True, default=False), - Column("history_attrs_filename", TEXT), - Column("datasets_attrs_filename", TEXT), - Column("jobs_attrs_filename", TEXT), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - # Create job_export_history_archive table. - try: - JobExportHistoryArchive_table.create() - except Exception: - log.exception("Creating job_export_history_archive table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - # Drop job_export_history_archive table. - try: - JobExportHistoryArchive_table.drop() - except Exception: - log.exception("Dropping job_export_history_archive table failed.") diff --git a/lib/galaxy/model/migrate/versions/0059_sample_dataset_file_path.py b/lib/galaxy/model/migrate/versions/0059_sample_dataset_file_path.py deleted file mode 100644 index f112668990d7..000000000000 --- a/lib/galaxy/model/migrate/versions/0059_sample_dataset_file_path.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -Migration script to modify the 'file_path' field type in 'sample_dataset' table -to 'TEXT' so that it can support large file paths exceeding 255 characters -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, - Table, - TEXT, -) -from sqlalchemy.exc import NoSuchTableError - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - try: - SampleDataset_table = Table("sample_dataset", metadata, autoload=True) - except NoSuchTableError: - SampleDataset_table = None - log.debug("Failed loading table 'sample_dataset'") - - if SampleDataset_table is not None: - cmd = "SELECT id, file_path FROM sample_dataset" - result = migrate_engine.execute(cmd) - filepath_dict = {} - for r in result: - id = int(r[0]) - filepath_dict[id] = r[1] - # remove the 'file_path' column - try: - SampleDataset_table.c.file_path.drop() - except Exception: - log.exception("Deleting column 'file_path' from the 'sample_dataset' table failed.") - # create the column again - try: - col = Column("file_path", TEXT) - col.create(SampleDataset_table) - assert col is SampleDataset_table.c.file_path - except Exception: - log.exception("Creating column 'file_path' in the 'sample_dataset' table failed.") - - for id, file_path in filepath_dict.items(): - cmd = "update sample_dataset set file_path='%s' where id=%i" % (file_path, id) - migrate_engine.execute(cmd) - - -def downgrade(migrate_engine): - pass diff --git a/lib/galaxy/model/migrate/versions/0060_history_archive_import.py b/lib/galaxy/model/migrate/versions/0060_history_archive_import.py deleted file mode 100644 index 11bb16030e8c..000000000000 --- a/lib/galaxy/model/migrate/versions/0060_history_archive_import.py +++ /dev/null @@ -1,79 +0,0 @@ -""" -Migration script to create column and table for importing histories from -file archives. -""" - -import logging - -from sqlalchemy import ( - Boolean, - Column, - ForeignKey, - Integer, - MetaData, - Table, - TEXT, -) - -from galaxy.model.migrate.versions.util import engine_false - -log = logging.getLogger(__name__) -metadata = MetaData() - -# Columns to add. - -importing_col = Column("importing", Boolean, index=True, default=False) -ldda_parent_col = Column("ldda_parent_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True) - -# Table to add. - -JobImportHistoryArchive_table = Table( - "job_import_history_archive", - metadata, - Column("id", Integer, primary_key=True), - Column("job_id", Integer, ForeignKey("job.id"), index=True), - Column("history_id", Integer, ForeignKey("history.id"), index=True), - Column("archive_dir", TEXT), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - # Add column to history table and initialize. - try: - History_table = Table("history", metadata, autoload=True) - importing_col.create(History_table, index_name="ix_history_importing") - assert importing_col is History_table.c.importing - - # Initialize column to false. - migrate_engine.execute(f"UPDATE history SET importing={engine_false(migrate_engine)}") - except Exception: - log.exception("Adding column 'importing' to history table failed.") - - # Create job_import_history_archive table. - try: - JobImportHistoryArchive_table.create() - except Exception: - log.exception("Creating job_import_history_archive table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - # Drop 'importing' column from history table. - try: - History_table = Table("history", metadata, autoload=True) - importing_col = History_table.c.importing - importing_col.drop() - except Exception: - log.exception("Dropping column 'importing' from history table failed.") - - # Drop job_import_history_archive table. - try: - JobImportHistoryArchive_table.drop() - except Exception: - log.exception("Dropping job_import_history_archive table failed.") diff --git a/lib/galaxy/model/migrate/versions/0061_tasks.py b/lib/galaxy/model/migrate/versions/0061_tasks.py deleted file mode 100644 index c4523f223fe5..000000000000 --- a/lib/galaxy/model/migrate/versions/0061_tasks.py +++ /dev/null @@ -1,61 +0,0 @@ -""" -Migration script to create tables task management. -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - String, - Table, - TEXT, -) - -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - -Task_table = Table( - "task", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("execution_time", DateTime), - Column("update_time", DateTime, default=now, onupdate=now), - Column("state", String(64), index=True), - Column("command_line", TEXT), - Column("param_filename", String(1024)), - Column("runner_name", String(255)), - Column("stdout", TEXT), - Column("stderr", TEXT), - Column("traceback", TEXT), - Column("job_id", Integer, ForeignKey("job.id"), index=True, nullable=False), - Column("part_file", String(1024)), - Column("task_runner_name", String(255)), - Column("task_runner_external_id", String(255)), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(Task_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_table(Task_table) diff --git a/lib/galaxy/model/migrate/versions/0062_user_openid_table.py b/lib/galaxy/model/migrate/versions/0062_user_openid_table.py deleted file mode 100644 index 130ffd57661d..000000000000 --- a/lib/galaxy/model/migrate/versions/0062_user_openid_table.py +++ /dev/null @@ -1,69 +0,0 @@ -""" -Migration script to create table for associating sessions and users with -OpenIDs. -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Index, - Integer, - MetaData, - Table, - TEXT, -) - -now = datetime.datetime.utcnow -log = logging.getLogger(__name__) -metadata = MetaData() - -# Table to add - -UserOpenID_table = Table( - "galaxy_user_openid", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, index=True, default=now, onupdate=now), - Column("session_id", Integer, ForeignKey("galaxy_session.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("openid", TEXT), -) - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - - # Create galaxy_user_openid table - try: - UserOpenID_table.create() - except Exception: - log.exception("Creating galaxy_user_openid table failed.") - - ix_name = "ix_galaxy_user_openid_openid" - if migrate_engine.name == "mysql": - i = "ALTER TABLE galaxy_user_openid ADD UNIQUE INDEX ( openid( 255 ) )" - migrate_engine.execute(i) - else: - i = Index(ix_name, UserOpenID_table.c.openid, unique=True) - try: - i.create() - except Exception: - log.exception("Adding index '%s' failed.", ix_name) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - # Drop galaxy_user_openid table - try: - UserOpenID_table.drop() - except Exception: - log.exception("Dropping galaxy_user_openid table failed.") diff --git a/lib/galaxy/model/migrate/versions/0063_sequencer_table.py b/lib/galaxy/model/migrate/versions/0063_sequencer_table.py deleted file mode 100644 index 0cc2489a2e23..000000000000 --- a/lib/galaxy/model/migrate/versions/0063_sequencer_table.py +++ /dev/null @@ -1,61 +0,0 @@ -""" -Migration script to create a new 'sequencer' table -""" - -import datetime -import logging - -from sqlalchemy import ( - Boolean, - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, - TEXT, -) - -from galaxy.model.custom_types import TrimmedString - -now = datetime.datetime.utcnow -log = logging.getLogger(__name__) -metadata = MetaData() - -# Table to add -Sequencer_table = Table( - "sequencer", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("name", TrimmedString(255), nullable=False), - Column("description", TEXT), - Column("sequencer_type_id", TrimmedString(255), nullable=False), - Column("version", TrimmedString(255)), - Column("form_definition_id", Integer, ForeignKey("form_definition.id"), index=True), - Column("form_values_id", Integer, ForeignKey("form_values.id"), index=True), - Column("deleted", Boolean, index=True, default=False), -) - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - # create the sequencer table - try: - Sequencer_table.create() - except Exception: - log.exception("Creating 'sequencer' table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - # delete sequencer table - try: - Sequencer_table = Table("sequencer", metadata, autoload=True) - Sequencer_table.drop() - except Exception: - log.exception("Deleting 'sequencer' table failed.") diff --git a/lib/galaxy/model/migrate/versions/0064_add_run_and_sample_run_association_tables.py b/lib/galaxy/model/migrate/versions/0064_add_run_and_sample_run_association_tables.py deleted file mode 100644 index e94c790ef640..000000000000 --- a/lib/galaxy/model/migrate/versions/0064_add_run_and_sample_run_association_tables.py +++ /dev/null @@ -1,71 +0,0 @@ -""" -Migration script to add the run and sample_run_association tables. -""" - -import datetime -import logging - -from sqlalchemy import ( - Boolean, - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, -) - -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -now = datetime.datetime.utcnow -log = logging.getLogger(__name__) -metadata = MetaData() - -Run_table = Table( - "run", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("form_definition_id", Integer, ForeignKey("form_definition.id"), index=True), - Column("form_values_id", Integer, ForeignKey("form_values.id"), index=True), - Column("deleted", Boolean, index=True, default=False), -) - -RequestTypeRunAssociation_table = Table( - "request_type_run_association", - metadata, - Column("id", Integer, primary_key=True), - Column("request_type_id", Integer, ForeignKey("request_type.id"), index=True, nullable=False), - Column("run_id", Integer, ForeignKey("run.id"), index=True, nullable=False), -) - -SampleRunAssociation_table = Table( - "sample_run_association", - metadata, - Column("id", Integer, primary_key=True), - Column("sample_id", Integer, ForeignKey("sample.id"), index=True, nullable=False), - Column("run_id", Integer, ForeignKey("run.id"), index=True, nullable=False), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(Run_table) - create_table(RequestTypeRunAssociation_table) - create_table(SampleRunAssociation_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_table(SampleRunAssociation_table) - drop_table(RequestTypeRunAssociation_table) - drop_table(Run_table) diff --git a/lib/galaxy/model/migrate/versions/0065_add_name_to_form_fields_and_values.py b/lib/galaxy/model/migrate/versions/0065_add_name_to_form_fields_and_values.py deleted file mode 100644 index 7ff1d99aacc2..000000000000 --- a/lib/galaxy/model/migrate/versions/0065_add_name_to_form_fields_and_values.py +++ /dev/null @@ -1,136 +0,0 @@ -""" -Migration script to add 'name' attribute to the JSON dict which describes -a form definition field and the form values in the database. In the 'form_values' -table, the 'content' column is now a JSON dict instead of a list. -""" - -import logging -from json import ( - dumps, - loads, -) - -from sqlalchemy import ( - MetaData, - Table, -) - -from galaxy.model.custom_types import _sniffnfix_pg9_hex - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - Table("form_definition", metadata, autoload=True) - Table("form_values", metadata, autoload=True) - - def get_value(lst, index): - try: - return str(lst[index]).replace("'", "''") - except IndexError: - return "" - - # Go through the entire table and add a 'name' attribute for each field - # in the list of fields for each form definition - cmd = "SELECT f.id, f.fields FROM form_definition AS f" - result = migrate_engine.execute(cmd) - for row in result: - form_definition_id = row[0] - fields = str(row[1]) - if not fields.strip(): - continue - fields_list = loads(_sniffnfix_pg9_hex(fields)) - if len(fields_list): - for index, field in enumerate(fields_list): - field["name"] = "field_%i" % index - field["helptext"] = field["helptext"].replace("'", "''").replace('"', "") - field["label"] = field["label"].replace("'", "''") - fields_json = dumps(fields_list) - if migrate_engine.name == "mysql": - cmd = "UPDATE form_definition AS f SET f.fields='%s' WHERE f.id=%i" % (fields_json, form_definition_id) - else: - cmd = "UPDATE form_definition SET fields='%s' WHERE id=%i" % (fields_json, form_definition_id) - migrate_engine.execute(cmd) - # replace the values list in the content field of the form_values table with a name:value dict - cmd = ( - "SELECT form_values.id, form_values.content, form_definition.fields" - " FROM form_values, form_definition" - " WHERE form_values.form_definition_id=form_definition.id" - " ORDER BY form_values.id ASC" - ) - result = migrate_engine.execute(cmd) - for row in result: - form_values_id = int(row[0]) - if not str(row[1]).strip(): - continue - row1 = str(row[1]).replace("\n", "").replace("\r", "") - values_list = loads(str(row1).strip()) - if not str(row[2]).strip(): - continue - fields_list = loads(str(row[2]).strip()) - if fields_list and isinstance(values_list, list): - values_dict = {} - for field_index, field in enumerate(fields_list): - field_name = field["name"] - values_dict[field_name] = get_value(values_list, field_index) - cmd = "UPDATE form_values SET content='%s' WHERE id=%i" % (dumps(values_dict), form_values_id) - migrate_engine.execute(cmd) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - Table("form_definition", metadata, autoload=True) - Table("form_values", metadata, autoload=True) - # remove the name attribute in the content column JSON dict in the form_values table - # and restore it to a list of values - cmd = ( - "SELECT form_values.id, form_values.content, form_definition.fields" - " FROM form_values, form_definition" - " WHERE form_values.form_definition_id=form_definition.id" - " ORDER BY form_values.id ASC" - ) - result = migrate_engine.execute(cmd) - for row in result: - form_values_id = int(row[0]) - if not str(row[1]).strip(): - continue - values_dict = loads(str(row[1])) - if not str(row[2]).strip(): - continue - fields_list = loads(str(row[2])) - if fields_list: - values_list = [] - for field in fields_list: - field_name = field["name"] - field_value = values_dict[field_name] - values_list.append(field_value) - cmd = "UPDATE form_values SET content='%s' WHERE id=%i" % (dumps(values_list), form_values_id) - migrate_engine.execute(cmd) - # remove name attribute from the field column of the form_definition table - cmd = "SELECT f.id, f.fields FROM form_definition AS f" - result = migrate_engine.execute(cmd) - for row in result: - form_definition_id = row[0] - fields = str(row[1]) - if not fields.strip(): - continue - fields_list = loads(_sniffnfix_pg9_hex(fields)) - if len(fields_list): - for field in fields_list: - if "name" in field: - del field["name"] - if migrate_engine.name == "mysql": - cmd = "UPDATE form_definition AS f SET f.fields='%s' WHERE f.id=%i" % ( - dumps(fields_list), - form_definition_id, - ) - else: - cmd = "UPDATE form_definition SET fields='%s' WHERE id=%i" % (dumps(fields_list), form_definition_id) - migrate_engine.execute(cmd) diff --git a/lib/galaxy/model/migrate/versions/0066_deferred_job_and_transfer_job_tables.py b/lib/galaxy/model/migrate/versions/0066_deferred_job_and_transfer_job_tables.py deleted file mode 100644 index 4e1ec88f4540..000000000000 --- a/lib/galaxy/model/migrate/versions/0066_deferred_job_and_transfer_job_tables.py +++ /dev/null @@ -1,81 +0,0 @@ -""" -Migration script to create table for storing deferred job and managed transfer -information. -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - DateTime, - Integer, - MetaData, - String, - Table, -) - -from galaxy.model.custom_types import JSONType - -now = datetime.datetime.utcnow -log = logging.getLogger(__name__) -metadata = MetaData() - -# Table to add - -DeferredJob_table = Table( - "deferred_job", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("state", String(64), index=True), - Column("plugin", String(128), index=True), - Column("params", JSONType), -) - -TransferJob_table = Table( - "transfer_job", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("state", String(64), index=True), - Column("path", String(1024)), - Column("params", JSONType), -) - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - - # Create deferred_job table - try: - DeferredJob_table.create() - except Exception: - log.exception("Creating deferred_job table failed.") - - # Create transfer_job table - try: - TransferJob_table.create() - except Exception: - log.exception("Creating transfer_job table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - # Drop deferred_job table - try: - DeferredJob_table.drop() - except Exception: - log.exception("Dropping deferred_job table failed.") - - # Drop transfer_job table - try: - TransferJob_table.drop() - except Exception: - log.exception("Dropping transfer_job table failed.") diff --git a/lib/galaxy/model/migrate/versions/0067_populate_sequencer_table.py b/lib/galaxy/model/migrate/versions/0067_populate_sequencer_table.py deleted file mode 100644 index 4ed8cfa7c30a..000000000000 --- a/lib/galaxy/model/migrate/versions/0067_populate_sequencer_table.py +++ /dev/null @@ -1,284 +0,0 @@ -""" -Migration script to populate the 'sequencer' table and it is populated using unique -entries in the 'datatx_info' column in the 'request_type' table. It also deletes the 'datatx_info' -column in the 'request_type' table and adds a foreign key to the 'sequencer' table. The -actual contents of the datatx_info column are stored as form_values. -""" - -import logging -from json import ( - dumps, - loads, -) - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - Table, -) - -from galaxy.model.custom_types import JSONType -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, - engine_false, - localtimestamp, - nextval, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def get_latest_id(migrate_engine, table): - result = migrate_engine.execute(f"select id from {table} order by id desc") - row = result.fetchone() - if row: - return row[0] - else: - raise Exception(f"Unable to get the latest id in the {table} table.") - - -def create_sequencer_form_definition(migrate_engine): - """ - Create a new form_definition containing 5 fields (host, username, password, - data_dir & rename_datasets) which described the existing datatx_info json - dict in the request_type table - """ - # create new form_definition_current in the db - cmd = "INSERT INTO form_definition_current VALUES ( {}, {}, {}, {}, {} )".format( - nextval(migrate_engine, "form_definition_current"), - localtimestamp(migrate_engine), - localtimestamp(migrate_engine), - "NULL", - engine_false(migrate_engine), - ) - migrate_engine.execute(cmd) - # get this form_definition_current id - form_definition_current_id = get_latest_id(migrate_engine, "form_definition_current") - # create new form_definition in the db - form_definition_name = "Generic sequencer form" - form_definition_desc = "" - form_definition_fields = [] - fields = [ - ("Host", "TextField"), - ("User name", "TextField"), - ("Password", "PasswordField"), - ("Data directory", "TextField"), - ] - for index, (label, field_type) in enumerate(fields): - form_definition_fields.append( - { - "name": "field_%i" % index, - "label": label, - "helptext": "", - "visible": True, - "required": False, - "type": field_type, - "selectlist": [], - "layout": "none", - "default": "", - } - ) - form_definition_fields.append( - { - "name": "field_%i" % len(fields), - "label": "Prepend the experiment name and sample name to the dataset name?", - "helptext": "Galaxy datasets are renamed by prepending the experiment name and sample name to the dataset name, ensuring dataset names remain unique in Galaxy even when multiple datasets have the same name on the sequencer.", - "visible": True, - "required": False, - "type": "SelectField", - "selectlist": [ - "Do not rename", - "Preprend sample name", - "Prepend experiment name", - "Prepend experiment and sample name", - ], - "layout": "none", - "default": "", - } - ) - form_definition_type = "Sequencer Information Form" - form_definition_layout = dumps("[]") - cmd = "INSERT INTO form_definition VALUES ( %s, %s, %s, '%s', '%s', %s, '%s', '%s', '%s' )" - cmd = cmd % ( - nextval(migrate_engine, "form_definition"), - localtimestamp(migrate_engine), - localtimestamp(migrate_engine), - form_definition_name, - form_definition_desc, - form_definition_current_id, - dumps(form_definition_fields), - form_definition_type, - form_definition_layout, - ) - migrate_engine.execute(cmd) - # get this form_definition id - form_definition_id = get_latest_id(migrate_engine, "form_definition") - # update the form_definition_id column in form_definition_current - cmd = "UPDATE form_definition_current SET latest_form_id=%i WHERE id=%i" % ( - form_definition_id, - form_definition_current_id, - ) - migrate_engine.execute(cmd) - return form_definition_id - - -def get_sequencer_id(migrate_engine, sequencer_info): - """Get the sequencer id corresponding to the sequencer information""" - # Check if there is any existing sequencer which have the same sequencer - # information fields & values - cmd = "SELECT sequencer.id, form_values.content FROM sequencer, form_values WHERE sequencer.form_values_id=form_values.id" - result = migrate_engine.execute(cmd) - for row in result: - sequencer_id = row[0] - values = str(row[1]) - if not values.strip(): - continue - values = loads(values) - # proceed only if sequencer_info is a valid list - if values and isinstance(values, dict): - if ( - sequencer_info.get("host", "") == values.get("field_0", "") - and sequencer_info.get("username", "") == values.get("field_1", "") - and sequencer_info.get("password", "") == values.get("field_2", "") - and sequencer_info.get("data_dir", "") == values.get("field_3", "") - and sequencer_info.get("rename_dataset", "") == values.get("field_4", "") - ): - return sequencer_id - return None - - -def add_sequencer(migrate_engine, sequencer_index, sequencer_form_definition_id, sequencer_info): - """Adds a new sequencer to the sequencer table along with its form values.""" - # Create a new form values record with the supplied sequencer information - values = dumps( - { - "field_0": sequencer_info.get("host", ""), - "field_1": sequencer_info.get("username", ""), - "field_2": sequencer_info.get("password", ""), - "field_3": sequencer_info.get("data_dir", ""), - "field_4": sequencer_info.get("rename_dataset", ""), - } - ) - cmd = "INSERT INTO form_values VALUES ( {}, {}, {}, {}, '{}' )".format( - nextval(migrate_engine, "form_values"), - localtimestamp(migrate_engine), - localtimestamp(migrate_engine), - sequencer_form_definition_id, - values, - ) - migrate_engine.execute(cmd) - sequencer_form_values_id = get_latest_id(migrate_engine, "form_values") - # Create a new sequencer record with reference to the form value created above. - name = "Sequencer_%i" % sequencer_index - desc = "" - version = "" - sequencer_type_id = "simple_unknown_sequencer" - cmd = "INSERT INTO sequencer VALUES ( {}, {}, {}, '{}', '{}', '{}', '{}', {}, {}, {} )".format( - nextval(migrate_engine, "sequencer"), - localtimestamp(migrate_engine), - localtimestamp(migrate_engine), - name, - desc, - sequencer_type_id, - version, - sequencer_form_definition_id, - sequencer_form_values_id, - engine_false(migrate_engine), - ) - migrate_engine.execute(cmd) - return get_latest_id(migrate_engine, "sequencer") - - -def update_sequencer_id_in_request_type(migrate_engine, request_type_id, sequencer_id): - """Update the foreign key to the sequencer table in the request_type table""" - cmd = "UPDATE request_type SET sequencer_id=%i WHERE id=%i" % (sequencer_id, request_type_id) - migrate_engine.execute(cmd) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - RequestType_table = Table("request_type", metadata, autoload=True) - # create foreign key field to the sequencer table in the request_type table - col = Column("sequencer_id", Integer, ForeignKey("sequencer.id"), nullable=True) - add_column(col, RequestType_table, metadata) - # copy the sequencer information contained in the 'datatx_info' column - # of the request_type table to the form values referenced in the sequencer table - cmd = "SELECT id, name, datatx_info FROM request_type ORDER BY id ASC" - result = migrate_engine.execute(cmd) - results_list = result.fetchall() - # Proceed only if request_types exists - if len(results_list): - # In this migration script the all the contents of the datatx_info are stored as form_values - # with a pointer to the sequencer table. This way the sequencer information can be customized - # by the admin and is no longer restricted to host, username, password, data directory. - # For the existing request_types in the database, we add a new form_definition - # with these 4 fields. Then we populate the sequencer table with unique datatx_info - # column from the existing request_types. - sequencer_form_definition_id = create_sequencer_form_definition(migrate_engine) - sequencer_index = 1 - for row in results_list: - request_type_id = row[0] - sequencer_info = str(row[2]) # datatx_info column - # skip if sequencer_info is empty - if not sequencer_info.strip() or sequencer_info in ["None", "null"]: - continue - sequencer_info = loads(sequencer_info.strip()) - # proceed only if sequencer_info is a valid dict - if sequencer_info and isinstance(sequencer_info, dict): - # check if this sequencer has already been added to the sequencer table - sequencer_id = get_sequencer_id(migrate_engine, sequencer_info) - if not sequencer_id: - # add to the sequencer table - sequencer_id = add_sequencer( - migrate_engine, sequencer_index, sequencer_form_definition_id, sequencer_info - ) - # now update the sequencer_id column in request_type table - update_sequencer_id_in_request_type(migrate_engine, request_type_id, sequencer_id) - sequencer_index = sequencer_index + 1 - - # Finally delete the 'datatx_info' column from the request_type table - drop_column("datatx_info", RequestType_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - RequestType_table = Table("request_type", metadata, autoload=True) - # create the 'datatx_info' column - col = Column("datatx_info", JSONType) - add_column(col, RequestType_table, metadata) - # restore the datatx_info column data in the request_type table with data from - # the sequencer and the form_values table - cmd = ( - "SELECT request_type.id, form_values.content " - + " FROM request_type, sequencer, form_values " - + " WHERE request_type.sequencer_id=sequencer.id AND sequencer.form_values_id=form_values.id " - + " ORDER BY request_type.id ASC" - ) - result = migrate_engine.execute(cmd) - for row in result: - request_type_id = row[0] - seq_values = loads(str(row[1])) - # create the datatx_info json dict - datatx_info = dumps( - dict( - host=seq_values.get("field_0", ""), - username=seq_values.get("field_1", ""), - password=seq_values.get("field_2", ""), - data_dir=seq_values.get("field_3", ""), - rename_dataset=seq_values.get("field_4", ""), - ) - ) - # update the column - cmd = "UPDATE request_type SET datatx_info='%s' WHERE id=%i" % (datatx_info, request_type_id) - migrate_engine.execute(cmd) - # delete foreign key field to the sequencer table in the request_type table - drop_column("sequencer_id", RequestType_table) diff --git a/lib/galaxy/model/migrate/versions/0068_rename_sequencer_to_external_services.py b/lib/galaxy/model/migrate/versions/0068_rename_sequencer_to_external_services.py deleted file mode 100644 index cef6baa7ca34..000000000000 --- a/lib/galaxy/model/migrate/versions/0068_rename_sequencer_to_external_services.py +++ /dev/null @@ -1,168 +0,0 @@ -""" -This migration script renames the sequencer table to 'external_service' table and -creates a association table, 'request_type_external_service_association' and -populates it. The 'sequencer_id' foreign_key from the 'request_type' table is removed. -The 'sequencer_type_id' column is renamed to 'external_service_type_id' in the renamed -table 'external_service'. Finally, adds a foreign key to the external_service table in the -sample_dataset table and populates it. -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - Table, -) - -from galaxy.model.custom_types import TrimmedString -from galaxy.model.migrate.versions.util import ( - add_column, - create_table, - drop_column, - drop_table, - nextval, -) - -now = datetime.datetime.utcnow -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - # rename 'sequencer' table to 'external_service' - Sequencer_table = Table("sequencer", metadata, autoload=True) - Sequencer_table.rename("external_service") - - # if running PostgreSQL, rename the primary key sequence too - if migrate_engine.name in ["postgres", "postgresql"]: - cmd = "ALTER SEQUENCE sequencer_id_seq RENAME TO external_service_id_seq" - migrate_engine.execute(cmd) - - # Add 'external_services_id' column to 'sample_dataset' table - SampleDataset_table = Table("sample_dataset", metadata, autoload=True) - col = Column( - "external_service_id", - Integer, - ForeignKey("external_service.id", name="sample_dataset_external_services_id_fk"), - index=True, - ) - add_column(col, SampleDataset_table, metadata, index_name="ix_sample_dataset_external_service_id") - - # populate the column - cmd = ( - "SELECT sample_dataset.id, request_type.sequencer_id " - + " FROM sample_dataset, sample, request, request_type " - + " WHERE sample.id=sample_dataset.sample_id and request.id=sample.request_id and request.request_type_id=request_type.id " - + " ORDER BY sample_dataset.id" - ) - try: - result = migrate_engine.execute(cmd) - for r in result: - sample_dataset_id = int(r[0]) - sequencer_id = int(r[1]) - cmd = "UPDATE sample_dataset SET external_service_id='%i' where id=%i" % (sequencer_id, sample_dataset_id) - migrate_engine.execute(cmd) - except Exception: - log.exception("Exception executing SQL command: %s", cmd) - - # rename 'sequencer_type_id' column to 'external_service_type_id' in the table 'external_service' - # create the column as 'external_service_type_id' - ExternalServices_table = Table("external_service", metadata, autoload=True) - col = Column("external_service_type_id", TrimmedString(255)) - add_column(col, ExternalServices_table, metadata) - - # populate this new column - cmd = "UPDATE external_service SET external_service_type_id=sequencer_type_id" - migrate_engine.execute(cmd) - - # remove the 'sequencer_type_id' column - drop_column("sequencer_type_id", ExternalServices_table) - - # create 'request_type_external_service_association' table - RequestTypeExternalServiceAssociation_table = Table( - "request_type_external_service_association", - metadata, - Column("id", Integer, primary_key=True), - Column("request_type_id", Integer, ForeignKey("request_type.id"), index=True), - Column("external_service_id", Integer, ForeignKey("external_service.id"), index=True), - ) - create_table(RequestTypeExternalServiceAssociation_table) - - # populate 'request_type_external_service_association' table - cmd = "SELECT id, sequencer_id FROM request_type ORDER BY id ASC" - result = migrate_engine.execute(cmd) - results_list = result.fetchall() - # Proceed only if request_types exists - for row in results_list: - request_type_id = row[0] - sequencer_id = row[1] - if not sequencer_id: - sequencer_id = "null" - cmd = "INSERT INTO request_type_external_service_association VALUES ( {}, {}, {} )".format( - nextval(migrate_engine, "request_type_external_service_association"), request_type_id, sequencer_id - ) - migrate_engine.execute(cmd) - - # TODO: Dropping a column used in a foreign key fails in MySQL, need to remove the FK first. - drop_column("sequencer_id", "request_type", metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - # SQLite does not always update foreign key constraints when the target - # table is renamed, so we start with the table rename. - # rename the 'external_service' table to 'sequencer' - ExternalServices_table = Table("external_service", metadata, autoload=True) - ExternalServices_table.rename("sequencer") - - # if running PostgreSQL, rename the primary key sequence too - if migrate_engine.name in ["postgres", "postgresql"]: - cmd = "ALTER SEQUENCE external_service_id_seq RENAME TO sequencer_id_seq" - migrate_engine.execute(cmd) - - # create the 'sequencer_id' column in the 'request_type' table - col = Column("sequencer_id", Integer, ForeignKey("sequencer.id"), nullable=True) - add_column(col, "request_type", metadata) - - # populate 'sequencer_id' column in the 'request_type' table from the - # 'request_type_external_service_association' table - cmd = "SELECT request_type_id, external_service_id FROM request_type_external_service_association ORDER BY id ASC" - result = migrate_engine.execute(cmd) - results_list = result.fetchall() - for row in results_list: - request_type_id = row[0] - external_service_id = row[1] - cmd = "UPDATE request_type SET sequencer_id=%i WHERE id=%i" % (external_service_id, request_type_id) - migrate_engine.execute(cmd) - - # remove the 'request_type_external_service_association' table - RequestTypeExternalServiceAssociation_table = Table( - "request_type_external_service_association", metadata, autoload=True - ) - drop_table(RequestTypeExternalServiceAssociation_table) - - # rename 'external_service_type_id' column to 'sequencer_type_id' in the table 'sequencer' - # create the column 'sequencer_type_id' - Sequencer_table = Table("sequencer", metadata, autoload=True) - col = Column("sequencer_type_id", TrimmedString(255)) # should also have nullable=False - add_column(col, Sequencer_table, metadata) - - # populate this new column - cmd = "UPDATE sequencer SET sequencer_type_id=external_service_type_id" - migrate_engine.execute(cmd) - - # remove the 'external_service_type_id' column - drop_column("external_service_type_id", Sequencer_table) - - # drop the 'external_service_id' column in the 'sample_dataset' table - drop_column("external_service_id", "sample_dataset", metadata) diff --git a/lib/galaxy/model/migrate/versions/0069_rename_sequencer_form_type.py b/lib/galaxy/model/migrate/versions/0069_rename_sequencer_form_type.py deleted file mode 100644 index a005987cabc1..000000000000 --- a/lib/galaxy/model/migrate/versions/0069_rename_sequencer_form_type.py +++ /dev/null @@ -1,29 +0,0 @@ -""" -Migration script to rename the sequencer information form type to external service information form -""" - -import logging - -from sqlalchemy import MetaData - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - current_form_type = "Sequencer Information Form" - new_form_type = "External Service Information Form" - cmd = f"update form_definition set type='{new_form_type}' where type='{current_form_type}'" - migrate_engine.execute(cmd) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - new_form_type = "Sequencer Information Form" - current_form_type = "External Service Information Form" - cmd = f"update form_definition set type='{new_form_type}' where type='{current_form_type}'" - migrate_engine.execute(cmd) diff --git a/lib/galaxy/model/migrate/versions/0070_add_info_column_to_deferred_job_table.py b/lib/galaxy/model/migrate/versions/0070_add_info_column_to_deferred_job_table.py deleted file mode 100644 index 64826809bb52..000000000000 --- a/lib/galaxy/model/migrate/versions/0070_add_info_column_to_deferred_job_table.py +++ /dev/null @@ -1,38 +0,0 @@ -""" -Migration script to add 'info' column to the transfer_job table. -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, - Table, - TEXT, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - try: - TransferJob_table = Table("transfer_job", metadata, autoload=True) - c = Column("info", TEXT) - c.create(TransferJob_table) - assert c is TransferJob_table.c.info - except Exception: - log.exception("Adding info column to transfer_job table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - try: - TransferJob_table = Table("transfer_job", metadata, autoload=True) - TransferJob_table.c.info.drop() - except Exception: - log.exception("Dropping info column from transfer_job table failed.") diff --git a/lib/galaxy/model/migrate/versions/0071_add_history_and_workflow_to_sample.py b/lib/galaxy/model/migrate/versions/0071_add_history_and_workflow_to_sample.py deleted file mode 100644 index 97b11977d805..000000000000 --- a/lib/galaxy/model/migrate/versions/0071_add_history_and_workflow_to_sample.py +++ /dev/null @@ -1,44 +0,0 @@ -""" -Migration script to add 'workflow' and 'history' columns for a sample. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - Table, -) - -from galaxy.model.custom_types import JSONType -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - Sample_table = Table("sample", metadata, autoload=True) - c1 = Column("workflow", JSONType, nullable=True) - add_column(c1, Sample_table, metadata) - - c2 = Column("history_id", Integer, ForeignKey("history.id"), nullable=True) - add_column(c2, Sample_table, metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - Sample_table = Table("sample", metadata, autoload=True) - drop_column("workflow", Sample_table) - drop_column("history_id", Sample_table) diff --git a/lib/galaxy/model/migrate/versions/0072_add_pid_and_socket_columns_to_transfer_job_table.py b/lib/galaxy/model/migrate/versions/0072_add_pid_and_socket_columns_to_transfer_job_table.py deleted file mode 100644 index d7b5c7904f96..000000000000 --- a/lib/galaxy/model/migrate/versions/0072_add_pid_and_socket_columns_to_transfer_job_table.py +++ /dev/null @@ -1,42 +0,0 @@ -""" -Migration script to add 'pid' and 'socket' columns to the transfer_job table. -""" - -import logging - -from sqlalchemy import ( - Column, - Integer, - MetaData, - Table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - try: - TransferJob_table = Table("transfer_job", metadata, autoload=True) - c = Column("pid", Integer) - c.create(TransferJob_table) - assert c is TransferJob_table.c.pid - c = Column("socket", Integer) - c.create(TransferJob_table) - assert c is TransferJob_table.c.socket - except Exception: - log.exception("Adding columns to transfer_job table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - try: - TransferJob_table = Table("transfer_job", metadata, autoload=True) - TransferJob_table.c.pid.drop() - TransferJob_table.c.socket.drop() - except Exception: - log.exception("Dropping columns from transfer_job table failed.") diff --git a/lib/galaxy/model/migrate/versions/0073_add_ldda_to_implicit_conversion_table.py b/lib/galaxy/model/migrate/versions/0073_add_ldda_to_implicit_conversion_table.py deleted file mode 100644 index ebc8f3f50c58..000000000000 --- a/lib/galaxy/model/migrate/versions/0073_add_ldda_to_implicit_conversion_table.py +++ /dev/null @@ -1,43 +0,0 @@ -""" -Migration script to add 'ldda_parent_id' column to the implicitly_converted_dataset_association table. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - c = Column( - "ldda_parent_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True, nullable=True - ) - add_column( - c, - "implicitly_converted_dataset_association", - metadata, - index_name="ix_implicitly_converted_dataset_assoc_ldda_parent_id", - ) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("ldda_parent_id", "implicitly_converted_dataset_association", metadata) diff --git a/lib/galaxy/model/migrate/versions/0074_add_purged_column_to_library_dataset_table.py b/lib/galaxy/model/migrate/versions/0074_add_purged_column_to_library_dataset_table.py deleted file mode 100644 index 9284f3a179f0..000000000000 --- a/lib/galaxy/model/migrate/versions/0074_add_purged_column_to_library_dataset_table.py +++ /dev/null @@ -1,60 +0,0 @@ -""" -Migration script to add 'purged' column to the library_dataset table. -""" - -import logging - -from sqlalchemy import ( - Boolean, - Column, - MetaData, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, - engine_false, - engine_true, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - c = Column("purged", Boolean, index=True, default=False) - add_column(c, "library_dataset", metadata, index_name="ix_library_dataset_purged") - # Update the purged flag to the default False - cmd = f"UPDATE library_dataset SET purged = {engine_false(migrate_engine)};" - try: - migrate_engine.execute(cmd) - except Exception: - log.exception("Setting default data for library_dataset.purged column failed.") - - # Update the purged flag for those LibraryDatasets whose purged flag should be True. This happens - # when the LibraryDataset has no active LibraryDatasetDatasetAssociations. - cmd = f"SELECT * FROM library_dataset WHERE deleted = {engine_true(migrate_engine)};" - deleted_lds = migrate_engine.execute(cmd).fetchall() - for row in deleted_lds: - cmd = ( - "SELECT * FROM library_dataset_dataset_association WHERE library_dataset_id = %d AND library_dataset_dataset_association.deleted = %s;" - % (int(row.id), engine_false(migrate_engine)) - ) - active_lddas = migrate_engine.execute(cmd).fetchall() - if not active_lddas: - print("Updating purged column to True for LibraryDataset id : ", int(row.id)) - cmd = "UPDATE library_dataset SET purged = %s WHERE id = %d;" % (engine_true(migrate_engine), int(row.id)) - migrate_engine.execute(cmd) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - # SQLAlchemy Migrate has a bug when dropping a boolean column in SQLite - if migrate_engine.name != "sqlite": - drop_column("purged", "library_dataset", metadata) diff --git a/lib/galaxy/model/migrate/versions/0075_add_subindex_column_to_run_table.py b/lib/galaxy/model/migrate/versions/0075_add_subindex_column_to_run_table.py deleted file mode 100644 index 536682a913d7..000000000000 --- a/lib/galaxy/model/migrate/versions/0075_add_subindex_column_to_run_table.py +++ /dev/null @@ -1,39 +0,0 @@ -""" -Migration script to add a 'subindex' column to the run table. -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, - Table, -) - -from galaxy.model.custom_types import TrimmedString - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - try: - Run_table = Table("run", metadata, autoload=True) - c = Column("subindex", TrimmedString(255), index=True) - c.create(Run_table, index_name="ix_run_subindex") - assert c is Run_table.c.subindex - except Exception: - log.exception("Adding the subindex column to the run table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - try: - Run_table = Table("run", metadata, autoload=True) - Run_table.c.subindex.drop() - except Exception: - log.exception("Dropping the subindex column from run table failed.") diff --git a/lib/galaxy/model/migrate/versions/0076_fix_form_values_data_corruption.py b/lib/galaxy/model/migrate/versions/0076_fix_form_values_data_corruption.py deleted file mode 100644 index f0d439230419..000000000000 --- a/lib/galaxy/model/migrate/versions/0076_fix_form_values_data_corruption.py +++ /dev/null @@ -1,98 +0,0 @@ -""" -This migration script fixes the data corruption caused in the form_values -table (content json field) by migrate script 65. -""" - -import logging -from json import ( - dumps, - loads, -) - -from sqlalchemy import MetaData - -from galaxy.model.custom_types import _sniffnfix_pg9_hex - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - cmd = ( - "SELECT form_values.id as id, form_values.content as field_values, form_definition.fields as fdfields " - + " FROM form_definition, form_values " - + " WHERE form_values.form_definition_id=form_definition.id " - + " ORDER BY form_values.id" - ) - result = migrate_engine.execute(cmd) - corrupted_rows = 0 - for row in result: - # first check if loading the dict from the json succeeds - # if that fails, it means that the content field is corrupted. - try: - field_values_dict = loads(_sniffnfix_pg9_hex(str(row["field_values"]))) - except Exception: - corrupted_rows = corrupted_rows + 1 - # content field is corrupted - fields_list = loads(_sniffnfix_pg9_hex(str(row["fdfields"]))) - field_values_str = _sniffnfix_pg9_hex(str(row["field_values"])) - try: - # Encoding errors? Just to be safe. - print(f"Attempting to fix row {row['id']}") - print(f"Prior to replacement: {field_values_str}") - except Exception: - pass - field_values_dict = {} - # look for each field name in the values and extract its value (string) - for index in range(len(fields_list)): - field = fields_list[index] - field_name_key = f"\"{field['name']}\": \"" - field_index = field_values_str.find(field_name_key) - if field_index == -1: - # if the field name is not present the field values dict then - # inform the admin that this form values cannot be fixed - print( - "The 'content' field of row 'id' %i does not have the field '%s' in the 'form_values' table and could not be fixed by this migration script." - % (int(field["id"]), field["name"]) - ) - else: - # check if this is the last field - if index == len(fields_list) - 1: - # since this is the last field, the value string lies between the - # field name and the '"}' string at the end, hence len(field_values_str) - 2 - value = field_values_str[field_index + len(field_name_key) : len(field_values_str) - 2] - else: - # if this is not the last field then the value string lies between - # this field name and the next field name - next_field = fields_list[index + 1] - next_field_index = field_values_str.find(f"\", \"{next_field['name']}\": \"") - value = field_values_str[field_index + len(field_name_key) : next_field_index] - # clean up the value string, escape the required quoutes and newline characters - value = ( - value.replace("'", "''") - .replace('"', '\\\\"') - .replace("\r", "\\\\r") - .replace("\n", "\\\\n") - .replace("\t", "\\\\t") - ) - # add to the new values dict - field_values_dict[field["name"]] = value - # update the db - json_values = dumps(field_values_dict) - cmd = "UPDATE form_values SET content='%s' WHERE id=%i" % (json_values, int(row["id"])) - migrate_engine.execute(cmd) - try: - print(f"Post replacement: {json_values}") - except Exception: - pass - if corrupted_rows: - print("Fixed %i corrupted rows." % corrupted_rows) - else: - print("No corrupted rows found.") - - -def downgrade(migrate_engine): - pass diff --git a/lib/galaxy/model/migrate/versions/0077_create_tool_tag_association_table.py b/lib/galaxy/model/migrate/versions/0077_create_tool_tag_association_table.py deleted file mode 100644 index 58763437c8c3..000000000000 --- a/lib/galaxy/model/migrate/versions/0077_create_tool_tag_association_table.py +++ /dev/null @@ -1,55 +0,0 @@ -""" -Migration script to create table for storing tool tag associations. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - Table, -) - -from galaxy.model.custom_types import TrimmedString - -log = logging.getLogger(__name__) -metadata = MetaData() - -# Table to add - -ToolTagAssociation_table = Table( - "tool_tag_association", - metadata, - Column("id", Integer, primary_key=True), - Column("tool_id", TrimmedString(255), index=True), - Column("tag_id", Integer, ForeignKey("tag.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("user_tname", TrimmedString(255), index=True), - Column("value", TrimmedString(255), index=True), - Column("user_value", TrimmedString(255), index=True), -) - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - - # Create tool_tag_association table - try: - ToolTagAssociation_table.create() - except Exception: - log.exception("Creating tool_tag_association table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - # Drop tool_tag_association table - try: - ToolTagAssociation_table.drop() - except Exception: - log.exception("Dropping tool_tag_association table failed.") diff --git a/lib/galaxy/model/migrate/versions/0078_add_columns_for_disk_usage_accounting.py b/lib/galaxy/model/migrate/versions/0078_add_columns_for_disk_usage_accounting.py deleted file mode 100644 index cab4ea2bf6f4..000000000000 --- a/lib/galaxy/model/migrate/versions/0078_add_columns_for_disk_usage_accounting.py +++ /dev/null @@ -1,60 +0,0 @@ -""" -Migration script to add 'total_size' column to the dataset table, 'purged' -column to the HDA table, and 'disk_usage' column to the User and GalaxySession -tables. -""" - -import logging - -from sqlalchemy import ( - Boolean, - Column, - MetaData, - Numeric, - Table, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - c = Column("total_size", Numeric(15, 0)) - add_column(c, "dataset", metadata) - - HistoryDatasetAssociation_table = Table("history_dataset_association", metadata, autoload=True) - c = Column("purged", Boolean, index=True, default=False) - add_column(c, HistoryDatasetAssociation_table, metadata, index_name="ix_history_dataset_association_purged") - try: - migrate_engine.execute(HistoryDatasetAssociation_table.update().values(purged=False)) - except Exception: - log.exception("Updating column 'purged' of table 'history_dataset_association' failed.") - - c = Column("disk_usage", Numeric(15, 0), index=True) - add_column(c, "galaxy_user", metadata, index_name="ix_galaxy_user_disk_usage") - - c = Column("disk_usage", Numeric(15, 0), index=True) - add_column(c, "galaxy_session", metadata, index_name="ix_galaxy_session_disk_usage") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("disk_usage", "galaxy_session", metadata) - drop_column("disk_usage", "galaxy_user", metadata) - - # SQLAlchemy Migrate has a bug when dropping a boolean column in SQLite - if migrate_engine.name != "sqlite": - drop_column("purged", "history_dataset_association", metadata) - - drop_column("total_size", "dataset", metadata) diff --git a/lib/galaxy/model/migrate/versions/0079_input_library_to_job_table.py b/lib/galaxy/model/migrate/versions/0079_input_library_to_job_table.py deleted file mode 100644 index 5ddfb97d6130..000000000000 --- a/lib/galaxy/model/migrate/versions/0079_input_library_to_job_table.py +++ /dev/null @@ -1,50 +0,0 @@ -""" -Migration script to add the job_to_input_library_dataset table. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - String, - Table, -) - -log = logging.getLogger(__name__) - -metadata = MetaData() - -JobToInputLibraryDatasetAssociation_table = Table( - "job_to_input_library_dataset", - metadata, - Column("id", Integer, primary_key=True), - Column("job_id", Integer, ForeignKey("job.id"), index=True), - Column("ldda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True), - Column("name", String(255)), -) - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - - # Create the job_to_input_library_dataset table - try: - JobToInputLibraryDatasetAssociation_table.create() - except Exception: - log.exception("Creating job_to_input_library_dataset table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - # Drop the job_to_input_library_dataset table - try: - JobToInputLibraryDatasetAssociation_table.drop() - except Exception: - log.exception("Dropping job_to_input_library_dataset table failed.") diff --git a/lib/galaxy/model/migrate/versions/0080_quota_tables.py b/lib/galaxy/model/migrate/versions/0080_quota_tables.py deleted file mode 100644 index 0973ea2ea0cc..000000000000 --- a/lib/galaxy/model/migrate/versions/0080_quota_tables.py +++ /dev/null @@ -1,127 +0,0 @@ -""" -Migration script to create tables for disk quotas. -""" - -import datetime -import logging - -from sqlalchemy import ( - BigInteger, - Boolean, - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - String, - Table, - TEXT, -) - -now = datetime.datetime.utcnow -log = logging.getLogger(__name__) -metadata = MetaData() - -# Tables to add - -Quota_table = Table( - "quota", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("name", String(255), index=True, unique=True), - Column("description", TEXT), - Column("bytes", BigInteger), - Column("operation", String(8)), - Column("deleted", Boolean, index=True, default=False), -) - -UserQuotaAssociation_table = Table( - "user_quota_association", - metadata, - Column("id", Integer, primary_key=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("quota_id", Integer, ForeignKey("quota.id"), index=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), -) - -GroupQuotaAssociation_table = Table( - "group_quota_association", - metadata, - Column("id", Integer, primary_key=True), - Column("group_id", Integer, ForeignKey("galaxy_group.id"), index=True), - Column("quota_id", Integer, ForeignKey("quota.id"), index=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), -) - -DefaultQuotaAssociation_table = Table( - "default_quota_association", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("type", String(32), index=True, unique=True), - Column("quota_id", Integer, ForeignKey("quota.id"), index=True), -) - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - - # Create quota table - try: - Quota_table.create() - except Exception: - log.exception("Creating quota table failed.") - - # Create user_quota_association table - try: - UserQuotaAssociation_table.create() - except Exception: - log.exception("Creating user_quota_association table failed.") - - # Create group_quota_association table - try: - GroupQuotaAssociation_table.create() - except Exception: - log.exception("Creating group_quota_association table failed.") - - # Create default_quota_association table - try: - DefaultQuotaAssociation_table.create() - except Exception: - log.exception("Creating default_quota_association table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - # Drop default_quota_association table - try: - DefaultQuotaAssociation_table.drop() - except Exception: - log.exception("Dropping default_quota_association table failed.") - - # Drop group_quota_association table - try: - GroupQuotaAssociation_table.drop() - except Exception: - log.exception("Dropping group_quota_association table failed.") - - # Drop user_quota_association table - try: - UserQuotaAssociation_table.drop() - except Exception: - log.exception("Dropping user_quota_association table failed.") - - # Drop quota table - try: - Quota_table.drop() - except Exception: - log.exception("Dropping quota table failed.") diff --git a/lib/galaxy/model/migrate/versions/0081_add_tool_version_to_hda_ldda.py b/lib/galaxy/model/migrate/versions/0081_add_tool_version_to_hda_ldda.py deleted file mode 100644 index 040f7e629673..000000000000 --- a/lib/galaxy/model/migrate/versions/0081_add_tool_version_to_hda_ldda.py +++ /dev/null @@ -1,47 +0,0 @@ -""" -Migration script to add a 'tool_version' column to the hda/ldda tables. -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, - Table, - TEXT, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - try: - hda_table = Table("history_dataset_association", metadata, autoload=True) - c = Column("tool_version", TEXT) - c.create(hda_table) - assert c is hda_table.c.tool_version - - ldda_table = Table("library_dataset_dataset_association", metadata, autoload=True) - c = Column("tool_version", TEXT) - c.create(ldda_table) - assert c is ldda_table.c.tool_version - - except Exception: - log.exception("Adding the tool_version column to the hda/ldda tables failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - try: - hda_table = Table("history_dataset_association", metadata, autoload=True) - hda_table.c.tool_version.drop() - - ldda_table = Table("library_dataset_dataset_association", metadata, autoload=True) - ldda_table.c.tool_version.drop() - except Exception: - log.exception("Dropping the tool_version column from hda/ldda table failed.") diff --git a/lib/galaxy/model/migrate/versions/0082_add_tool_shed_repository_table.py b/lib/galaxy/model/migrate/versions/0082_add_tool_shed_repository_table.py deleted file mode 100644 index 52acd68fd130..000000000000 --- a/lib/galaxy/model/migrate/versions/0082_add_tool_shed_repository_table.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -Migration script to add the tool_shed_repository table. -""" - -import datetime -import logging - -from sqlalchemy import ( - Boolean, - Column, - DateTime, - Integer, - MetaData, - Table, - TEXT, -) - -from galaxy.model.custom_types import TrimmedString -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - -# New table to store information about cloned tool shed repositories. -ToolShedRepository_table = Table( - "tool_shed_repository", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("tool_shed", TrimmedString(255), index=True), - Column("name", TrimmedString(255), index=True), - Column("description", TEXT), - Column("owner", TrimmedString(255), index=True), - Column("changeset_revision", TrimmedString(255), index=True), - Column("deleted", Boolean, index=True, default=False), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(ToolShedRepository_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_table(ToolShedRepository_table) diff --git a/lib/galaxy/model/migrate/versions/0083_add_prepare_files_to_task.py b/lib/galaxy/model/migrate/versions/0083_add_prepare_files_to_task.py deleted file mode 100644 index d37b17c68296..000000000000 --- a/lib/galaxy/model/migrate/versions/0083_add_prepare_files_to_task.py +++ /dev/null @@ -1,49 +0,0 @@ -""" -Migration script to add 'prepare_input_files_cmd' column to the task table and to rename a column. -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, - String, - Table, - TEXT, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - task_table = Table("task", metadata, autoload=True) - c = Column("prepare_input_files_cmd", TEXT, nullable=True) - add_column(c, task_table, metadata) - - c = Column("working_directory", String(1024), nullable=True) - add_column(c, task_table, metadata) - - # remove the 'part_file' column - nobody used tasks before this, so no data needs to be migrated - drop_column("part_file", task_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - task_table = Table("task", metadata, autoload=True) - c = Column("part_file", String(1024), nullable=True) - add_column(c, task_table, metadata) - - drop_column("working_directory", task_table) - drop_column("prepare_input_files_cmd", task_table) diff --git a/lib/galaxy/model/migrate/versions/0084_add_ldda_id_to_implicit_conversion_table.py b/lib/galaxy/model/migrate/versions/0084_add_ldda_id_to_implicit_conversion_table.py deleted file mode 100644 index 770e715c7bb7..000000000000 --- a/lib/galaxy/model/migrate/versions/0084_add_ldda_id_to_implicit_conversion_table.py +++ /dev/null @@ -1,38 +0,0 @@ -""" -Migration script to add 'ldda_id' column to the implicitly_converted_dataset_association table. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - c = Column("ldda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True, nullable=True) - add_column( - c, "implicitly_converted_dataset_association", metadata, index_name="ix_implicitly_converted_ds_assoc_ldda_id" - ) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("ldda_id", "implicitly_converted_dataset_association", metadata) diff --git a/lib/galaxy/model/migrate/versions/0085_add_task_info.py b/lib/galaxy/model/migrate/versions/0085_add_task_info.py deleted file mode 100644 index 289188cb6e43..000000000000 --- a/lib/galaxy/model/migrate/versions/0085_add_task_info.py +++ /dev/null @@ -1,39 +0,0 @@ -""" -Migration script to add 'info' column to the task table. -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, - Table, -) - -from galaxy.model.custom_types import TrimmedString - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - try: - task_table = Table("task", metadata, autoload=True) - c = Column("info", TrimmedString(255), nullable=True) - c.create(task_table) - assert c is task_table.c.info - except Exception: - log.exception("Adding info column to task table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - try: - task_table = Table("task", metadata, autoload=True) - task_table.c.info.drop() - except Exception: - log.exception("Dropping info column from task table failed.") diff --git a/lib/galaxy/model/migrate/versions/0086_add_tool_shed_repository_table_columns.py b/lib/galaxy/model/migrate/versions/0086_add_tool_shed_repository_table_columns.py deleted file mode 100644 index 168e21d9d424..000000000000 --- a/lib/galaxy/model/migrate/versions/0086_add_tool_shed_repository_table_columns.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -Migration script to add the metadata, update_available and includes_datatypes columns to the tool_shed_repository table. -""" - -import logging - -from sqlalchemy import ( - Boolean, - Column, - MetaData, - Table, -) - -from galaxy.model.custom_types import JSONType -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, - engine_false, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - ToolShedRepository_table = Table("tool_shed_repository", metadata, autoload=True) - c = Column("metadata", JSONType, nullable=True) - add_column(c, ToolShedRepository_table, metadata) - c = Column("includes_datatypes", Boolean, index=True, default=False) - add_column(c, ToolShedRepository_table, metadata, index_name="ix_tool_shed_repository_includes_datatypes") - try: - migrate_engine.execute(f"UPDATE tool_shed_repository SET includes_datatypes={engine_false(migrate_engine)}") - except Exception: - log.exception("Updating column 'includes_datatypes' of table 'tool_shed_repository' failed.") - c = Column("update_available", Boolean, default=False) - add_column(c, ToolShedRepository_table, metadata) - try: - migrate_engine.execute(f"UPDATE tool_shed_repository SET update_available={engine_false(migrate_engine)}") - except Exception: - log.exception("Updating column 'update_available' of table 'tool_shed_repository' failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - ToolShedRepository_table = Table("tool_shed_repository", metadata, autoload=True) - drop_column("metadata", ToolShedRepository_table) - # SQLAlchemy Migrate has a bug when dropping a boolean column in SQLite - if migrate_engine.name != "sqlite": - drop_column("includes_datatypes", ToolShedRepository_table) - drop_column("update_available", ToolShedRepository_table) diff --git a/lib/galaxy/model/migrate/versions/0087_tool_id_guid_map_table.py b/lib/galaxy/model/migrate/versions/0087_tool_id_guid_map_table.py deleted file mode 100644 index 056faf991f4e..000000000000 --- a/lib/galaxy/model/migrate/versions/0087_tool_id_guid_map_table.py +++ /dev/null @@ -1,57 +0,0 @@ -""" -Migration script to create the tool_id_guid_map table. -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - DateTime, - Index, - Integer, - MetaData, - String, - Table, - TEXT, -) - -from galaxy.model.custom_types import TrimmedString -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - -ToolIdGuidMap_table = Table( - "tool_id_guid_map", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("tool_id", String(255)), - Column("tool_version", TEXT), - Column("tool_shed", TrimmedString(255)), - Column("repository_owner", TrimmedString(255)), - Column("repository_name", TrimmedString(255)), - Column("guid", TEXT), - Index("ix_tool_id_guid_map_guid", "guid", unique=True, mysql_length=200), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(ToolIdGuidMap_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_table(ToolIdGuidMap_table) diff --git a/lib/galaxy/model/migrate/versions/0088_add_installed_changeset_revison_column.py b/lib/galaxy/model/migrate/versions/0088_add_installed_changeset_revison_column.py deleted file mode 100644 index 499e08d1ab04..000000000000 --- a/lib/galaxy/model/migrate/versions/0088_add_installed_changeset_revison_column.py +++ /dev/null @@ -1,59 +0,0 @@ -""" -Migration script to add the installed_changeset_revision column to the tool_shed_repository table. -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, -) - -from galaxy.model.custom_types import TrimmedString -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - col = Column("installed_changeset_revision", TrimmedString(255)) - add_column(col, "tool_shed_repository", metadata) - # Update each row by setting the value of installed_changeset_revison to be the value of changeset_revision. - # This will be problematic if the value of changeset_revision was updated to something other than the value - # that it was when the repository was installed (because the install path determined in real time will attempt to - # find the repository using the updated changeset_revison instead of the required installed_changeset_revision), - # but at the time this script was written, this scenario is extremely unlikely. - cmd = ( - "SELECT id AS id, " - + "installed_changeset_revision AS installed_changeset_revision, " - + "changeset_revision AS changeset_revision " - + "FROM tool_shed_repository;" - ) - tool_shed_repositories = migrate_engine.execute(cmd).fetchall() - update_count = 0 - for row in tool_shed_repositories: - cmd = ( - "UPDATE tool_shed_repository " - + f"SET installed_changeset_revision = '{row.changeset_revision}' " - + f"WHERE changeset_revision = '{row.changeset_revision}';" - ) - migrate_engine.execute(cmd) - update_count += 1 - print( - "Updated the installed_changeset_revision column for ", update_count, " rows in the tool_shed_repository table." - ) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("installed_changeset_revision", "tool_shed_repository", metadata) diff --git a/lib/galaxy/model/migrate/versions/0089_add_object_store_id_columns.py b/lib/galaxy/model/migrate/versions/0089_add_object_store_id_columns.py deleted file mode 100644 index 4b276f2215bb..000000000000 --- a/lib/galaxy/model/migrate/versions/0089_add_object_store_id_columns.py +++ /dev/null @@ -1,43 +0,0 @@ -""" -Migration script to add 'object_store_id' column to various tables -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, - Table, -) - -from galaxy.model.custom_types import TrimmedString - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - for t_name in ("dataset", "job", "metadata_file"): - t = Table(t_name, metadata, autoload=True) - c = Column("object_store_id", TrimmedString(255), index=True) - try: - c.create(t, index_name=f"ix_{t_name}_object_store_id") - assert c is t.c.object_store_id - except Exception: - log.exception("Adding object_store_id column to %s table failed.", t_name) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - for t_name in ("dataset", "job", "metadata_file"): - t = Table(t_name, metadata, autoload=True) - try: - t.c.object_store_id.drop() - except Exception: - log.exception("Dropping object_store_id column from %s table failed.", t_name) diff --git a/lib/galaxy/model/migrate/versions/0090_add_tool_shed_repository_table_columns.py b/lib/galaxy/model/migrate/versions/0090_add_tool_shed_repository_table_columns.py deleted file mode 100644 index b50fcd2fa56a..000000000000 --- a/lib/galaxy/model/migrate/versions/0090_add_tool_shed_repository_table_columns.py +++ /dev/null @@ -1,52 +0,0 @@ -""" -Migration script to add the uninstalled and dist_to_shed columns to the tool_shed_repository table. -""" - -import logging - -from sqlalchemy import ( - Boolean, - Column, - MetaData, - Table, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, - engine_false, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - ToolShedRepository_table = Table("tool_shed_repository", metadata, autoload=True) - c = Column("uninstalled", Boolean, default=False) - add_column(c, ToolShedRepository_table, metadata) - try: - migrate_engine.execute(f"UPDATE tool_shed_repository SET uninstalled={engine_false(migrate_engine)}") - except Exception: - log.exception("Updating column 'uninstalled' of table 'tool_shed_repository' failed.") - c = Column("dist_to_shed", Boolean, default=False) - add_column(c, ToolShedRepository_table, metadata) - try: - migrate_engine.execute(f"UPDATE tool_shed_repository SET dist_to_shed={engine_false(migrate_engine)}") - except Exception: - log.exception("Updating column 'dist_to_shed' of table 'tool_shed_repository' failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - ToolShedRepository_table = Table("tool_shed_repository", metadata, autoload=True) - # SQLAlchemy Migrate has a bug when dropping a boolean column in SQLite - if migrate_engine.name != "sqlite": - drop_column("uninstalled", ToolShedRepository_table) - drop_column("dist_to_shed", ToolShedRepository_table) diff --git a/lib/galaxy/model/migrate/versions/0091_add_tool_version_tables.py b/lib/galaxy/model/migrate/versions/0091_add_tool_version_tables.py deleted file mode 100644 index b0c2626dbc2b..000000000000 --- a/lib/galaxy/model/migrate/versions/0091_add_tool_version_tables.py +++ /dev/null @@ -1,128 +0,0 @@ -""" -Migration script to create the tool_version and tool_version_association tables and drop the tool_id_guid_map table. -""" - -import datetime -import logging -from json import loads - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Index, - Integer, - MetaData, - String, - Table, - TEXT, -) - -from galaxy.model.custom_types import ( - _sniffnfix_pg9_hex, - TrimmedString, -) -from galaxy.model.migrate.versions.util import ( - localtimestamp, - nextval, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - - -ToolVersion_table = Table( - "tool_version", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("tool_id", String(255)), - Column("tool_shed_repository_id", Integer, ForeignKey("tool_shed_repository.id"), index=True, nullable=True), -) - -ToolVersionAssociation_table = Table( - "tool_version_association", - metadata, - Column("id", Integer, primary_key=True), - Column("tool_id", Integer, ForeignKey("tool_version.id"), index=True, nullable=False), - Column("parent_id", Integer, ForeignKey("tool_version.id"), index=True, nullable=False), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - # Create the tables. - try: - ToolVersion_table.create() - except Exception: - log.exception("Creating tool_version table failed.") - try: - ToolVersionAssociation_table.create() - except Exception: - log.exception("Creating tool_version_association table failed.") - # Populate the tool table with tools included in installed tool shed repositories. - cmd = "SELECT id, metadata FROM tool_shed_repository" - result = migrate_engine.execute(cmd) - count = 0 - for row in result: - if row[1]: - tool_shed_repository_id = row[0] - repository_metadata = loads(_sniffnfix_pg9_hex(str(row[1]))) - # Create a new row in the tool table for each tool included in repository. We will NOT - # handle tool_version_associaions because we do not have the information we need to do so. - tools = repository_metadata.get("tools", []) - for tool_dict in tools: - cmd = "INSERT INTO tool_version VALUES (%s, %s, %s, '%s', %s)" % ( - nextval(migrate_engine, "tool_version"), - localtimestamp(migrate_engine), - localtimestamp(migrate_engine), - tool_dict["guid"], - tool_shed_repository_id, - ) - migrate_engine.execute(cmd) - count += 1 - print("Added %d rows to the new tool_version table." % count) - # Drop the tool_id_guid_map table since the 2 new tables render it unnecessary. - ToolIdGuidMap_table = Table("tool_id_guid_map", metadata, autoload=True) - try: - ToolIdGuidMap_table.drop() - except Exception: - log.exception("Dropping tool_id_guid_map table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - - ToolIdGuidMap_table = Table( - "tool_id_guid_map", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("tool_id", String(255)), - Column("tool_version", TEXT), - Column("tool_shed", TrimmedString(255)), - Column("repository_owner", TrimmedString(255)), - Column("repository_name", TrimmedString(255)), - Column("guid", TEXT), - Index("ix_tool_id_guid_map_guid", "guid", unique=True, mysql_length=200), - ) - - metadata.reflect() - try: - ToolVersionAssociation_table.drop() - except Exception: - log.exception("Dropping tool_version_association table failed.") - try: - ToolVersion_table.drop() - except Exception: - log.exception("Dropping tool_version table failed.") - try: - ToolIdGuidMap_table.create() - except Exception: - log.exception("Creating tool_id_guid_map table failed.") diff --git a/lib/galaxy/model/migrate/versions/0092_add_migrate_tools_table.py b/lib/galaxy/model/migrate/versions/0092_add_migrate_tools_table.py deleted file mode 100644 index 9fdb919349a9..000000000000 --- a/lib/galaxy/model/migrate/versions/0092_add_migrate_tools_table.py +++ /dev/null @@ -1,50 +0,0 @@ -""" -Migration script to create the migrate_tools table. -""" - -import logging - -from sqlalchemy import ( - Column, - Integer, - MetaData, - Table, - TEXT, -) - -from galaxy.model.custom_types import TrimmedString -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -MigrateTools_table = Table( - "migrate_tools", - metadata, - Column("repository_id", TrimmedString(255)), - Column("repository_path", TEXT), - Column("version", Integer), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(MigrateTools_table) - try: - cmd = "INSERT INTO migrate_tools VALUES ('GalaxyTools', 'lib/galaxy/tool_shed/migrate', %d)" % 1 - migrate_engine.execute(cmd) - except Exception: - log.exception("Inserting into table 'migrate_tools' failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_table(MigrateTools_table) diff --git a/lib/galaxy/model/migrate/versions/0093_add_job_params_col.py b/lib/galaxy/model/migrate/versions/0093_add_job_params_col.py deleted file mode 100644 index ffd2936845d8..000000000000 --- a/lib/galaxy/model/migrate/versions/0093_add_job_params_col.py +++ /dev/null @@ -1,48 +0,0 @@ -""" -Migration script to add a 'params' column to the 'job' table. -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, - Table, -) - -# Need our custom types, but don't import anything else from model -from galaxy.model.custom_types import TrimmedString - -log = logging.getLogger(__name__) -metadata = MetaData() - -# Column to add. -params_col = Column("params", TrimmedString(255), index=True) - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - - # Add column to Job table. - try: - Job_table = Table("job", metadata, autoload=True) - params_col.create(Job_table, index_name="ix_job_params") - assert params_col is Job_table.c.params - - except Exception: - log.exception("Adding column 'params' to job table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - # Drop column from Job table. - try: - Job_table = Table("job", metadata, autoload=True) - params_col = Job_table.c.params - params_col.drop() - except Exception: - log.exception("Dropping column 'params' from job table failed.") diff --git a/lib/galaxy/model/migrate/versions/0094_add_job_handler_col.py b/lib/galaxy/model/migrate/versions/0094_add_job_handler_col.py deleted file mode 100644 index 1654d6789016..000000000000 --- a/lib/galaxy/model/migrate/versions/0094_add_job_handler_col.py +++ /dev/null @@ -1,37 +0,0 @@ -""" -Migration script to add a 'handler' column to the 'job' table. -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, -) - -from galaxy.model.custom_types import TrimmedString -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -# Column to add. -handler_col = Column("handler", TrimmedString(255), index=True) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - add_column(handler_col, "job", metadata, index_name="ix_job_handler") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("handler", "job", metadata) diff --git a/lib/galaxy/model/migrate/versions/0095_hda_subsets.py b/lib/galaxy/model/migrate/versions/0095_hda_subsets.py deleted file mode 100644 index f6a4c6a4395e..000000000000 --- a/lib/galaxy/model/migrate/versions/0095_hda_subsets.py +++ /dev/null @@ -1,61 +0,0 @@ -""" -Migration script to create table for tracking history_dataset_association subsets. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Index, - Integer, - MetaData, - Table, - Unicode, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -# Table to add. - -HistoryDatasetAssociationSubset_table = Table( - "history_dataset_association_subset", - metadata, - Column("id", Integer, primary_key=True), - Column("history_dataset_association_id", Integer, ForeignKey("history_dataset_association.id")), - Column("history_dataset_association_subset_id", Integer, ForeignKey("history_dataset_association.id")), - Column("location", Unicode(255), index=True), -) - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - - # Create history_dataset_association_subset. - try: - HistoryDatasetAssociationSubset_table.create() - except Exception: - log.exception("Creating history_dataset_association_subset table failed.") - - # Manually create indexes because they are too long for MySQL databases. - i1 = Index("ix_hda_id", HistoryDatasetAssociationSubset_table.c.history_dataset_association_id) - i2 = Index("ix_hda_subset_id", HistoryDatasetAssociationSubset_table.c.history_dataset_association_subset_id) - try: - i1.create() - i2.create() - except Exception: - log.exception("Adding indices to table 'history_dataset_association_subset' table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - # Drop history_dataset_association_subset table. - try: - HistoryDatasetAssociationSubset_table.drop() - except Exception: - log.exception("Dropping history_dataset_association_subset table failed.") diff --git a/lib/galaxy/model/migrate/versions/0096_openid_provider.py b/lib/galaxy/model/migrate/versions/0096_openid_provider.py deleted file mode 100644 index 2a19e269073d..000000000000 --- a/lib/galaxy/model/migrate/versions/0096_openid_provider.py +++ /dev/null @@ -1,48 +0,0 @@ -""" -Migration script to add column to openid table for provider. -Remove any OpenID entries with nonunique GenomeSpace Identifier -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, - Table, -) - -from galaxy.model.custom_types import TrimmedString - -log = logging.getLogger(__name__) -BAD_IDENTIFIER = "https://identity.genomespace.org/identityServer/xrd.jsp" -metadata = MetaData() - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - - try: - OpenID_table = Table("galaxy_user_openid", metadata, autoload=True) - c = Column("provider", TrimmedString(255)) - c.create(OpenID_table) - assert c is OpenID_table.c.provider - except Exception: - log.exception("Adding provider column to galaxy_user_openid table failed.") - - try: - cmd = f"DELETE FROM galaxy_user_openid WHERE openid='{BAD_IDENTIFIER}'" - migrate_engine.execute(cmd) - except Exception: - log.exception("Deleting bad Identifiers from galaxy_user_openid failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - try: - OpenID_table = Table("galaxy_user_openid", metadata, autoload=True) - OpenID_table.c.provider.drop() - except Exception: - log.exception("Dropping provider column from galaxy_user_openid table failed.") diff --git a/lib/galaxy/model/migrate/versions/0097_add_ctx_rev_column.py b/lib/galaxy/model/migrate/versions/0097_add_ctx_rev_column.py deleted file mode 100644 index 81ff20f8f5b3..000000000000 --- a/lib/galaxy/model/migrate/versions/0097_add_ctx_rev_column.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -Migration script to add the ctx_rev column to the tool_shed_repository table. -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, -) - -from galaxy.model.custom_types import TrimmedString -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - col = Column("ctx_rev", TrimmedString(10)) - add_column(col, "tool_shed_repository", metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("ctx_rev", "tool_shed_repository", metadata) diff --git a/lib/galaxy/model/migrate/versions/0098_genome_index_tool_data_table.py b/lib/galaxy/model/migrate/versions/0098_genome_index_tool_data_table.py deleted file mode 100644 index 5bae2518624c..000000000000 --- a/lib/galaxy/model/migrate/versions/0098_genome_index_tool_data_table.py +++ /dev/null @@ -1,55 +0,0 @@ -""" -Migration script to create the genome_index_tool_data table. -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - String, - Table, -) - -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - -GenomeIndexToolData_table = Table( - "genome_index_tool_data", - metadata, - Column("id", Integer, primary_key=True), - Column("job_id", Integer, ForeignKey("job.id"), index=True), - Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True), - Column("deferred_job_id", Integer, ForeignKey("deferred_job.id"), index=True), - Column("transfer_job_id", Integer, ForeignKey("transfer_job.id"), index=True), - Column("fasta_path", String(255)), - Column("created_time", DateTime, default=now), - Column("modified_time", DateTime, default=now, onupdate=now), - Column("indexer", String(64)), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(GenomeIndexToolData_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_table(GenomeIndexToolData_table) diff --git a/lib/galaxy/model/migrate/versions/0099_add_tool_dependency_table.py b/lib/galaxy/model/migrate/versions/0099_add_tool_dependency_table.py deleted file mode 100644 index b8b6ef35c61d..000000000000 --- a/lib/galaxy/model/migrate/versions/0099_add_tool_dependency_table.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -Migration script to add the tool_dependency table. -""" - -import datetime -import logging - -from sqlalchemy import ( - Boolean, - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, -) - -from galaxy.model.custom_types import TrimmedString -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - -# New table to store information about cloned tool shed repositories. -ToolDependency_table = Table( - "tool_dependency", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("tool_shed_repository_id", Integer, ForeignKey("tool_shed_repository.id"), index=True, nullable=False), - Column("installed_changeset_revision", TrimmedString(255)), - Column("name", TrimmedString(255)), - Column("version", TrimmedString(40)), - Column("type", TrimmedString(40)), - Column("uninstalled", Boolean, default=False), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(ToolDependency_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_table(ToolDependency_table) diff --git a/lib/galaxy/model/migrate/versions/0100_alter_tool_dependency_table_version_column.py b/lib/galaxy/model/migrate/versions/0100_alter_tool_dependency_table_version_column.py deleted file mode 100644 index ea0a274b430a..000000000000 --- a/lib/galaxy/model/migrate/versions/0100_alter_tool_dependency_table_version_column.py +++ /dev/null @@ -1,46 +0,0 @@ -""" -Migration script to alter the type of the tool_dependency.version column from TrimmedString(40) to Text. -""" - -import logging - -from sqlalchemy import ( - MetaData, - Table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - Table("tool_dependency", metadata, autoload=True) - # Change the tool_dependency table's version column from TrimmedString to Text. - if migrate_engine.name in ["postgres", "postgresql"]: - cmd = "ALTER TABLE tool_dependency ALTER COLUMN version TYPE Text;" - elif migrate_engine.name == "mysql": - cmd = "ALTER TABLE tool_dependency MODIFY COLUMN version Text;" - else: - # We don't have to do anything for sqlite tables. From the sqlite documentation at http://sqlite.org/datatype3.html: - # 1.0 Storage Classes and Datatypes - # Each value stored in an SQLite database (or manipulated by the database engine) has one of the following storage classes: - # NULL. The value is a NULL value. - # INTEGER. The value is a signed integer, stored in 1, 2, 3, 4, 6, or 8 bytes depending on the magnitude of the value. - # REAL. The value is a floating point value, stored as an 8-byte IEEE floating point number. - # TEXT. The value is a text string, stored using the database encoding (UTF-8, UTF-16BE or UTF-16LE). - # BLOB. The value is a blob of data, stored exactly as it was input. - cmd = None - if cmd: - try: - migrate_engine.execute(cmd) - except Exception: - log.exception("Altering tool_dependency.version column from TrimmedString(40) to Text failed.") - - -def downgrade(migrate_engine): - # Not necessary to change column type Text to TrimmedString(40). - pass diff --git a/lib/galaxy/model/migrate/versions/0101_drop_installed_changeset_revision_column.py b/lib/galaxy/model/migrate/versions/0101_drop_installed_changeset_revision_column.py deleted file mode 100644 index 353ae12bb59a..000000000000 --- a/lib/galaxy/model/migrate/versions/0101_drop_installed_changeset_revision_column.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -Migration script to drop the installed_changeset_revision column from the tool_dependency table. -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, -) - -from galaxy.model.custom_types import TrimmedString -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("installed_changeset_revision", "tool_dependency", metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - c = Column("installed_changeset_revision", TrimmedString(255)) - add_column(c, "tool_dependency", metadata) diff --git a/lib/galaxy/model/migrate/versions/0102_add_tool_dependency_status_columns.py b/lib/galaxy/model/migrate/versions/0102_add_tool_dependency_status_columns.py deleted file mode 100644 index 3dd65fd7a8cf..000000000000 --- a/lib/galaxy/model/migrate/versions/0102_add_tool_dependency_status_columns.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -Migration script to add status and error_message columns to the tool_dependency table and drop the uninstalled column from the tool_dependency table. -""" - -import logging - -from sqlalchemy import ( - Boolean, - Column, - MetaData, - Table, - TEXT, -) - -from galaxy.model.custom_types import TrimmedString -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - ToolDependency_table = Table("tool_dependency", metadata, autoload=True) - if migrate_engine.name == "sqlite": - col = Column("status", TrimmedString(255)) - else: - col = Column("status", TrimmedString(255), nullable=False) - add_column(col, ToolDependency_table, metadata) - - col = Column("error_message", TEXT) - add_column(col, ToolDependency_table, metadata) - - # SQLAlchemy Migrate has a bug when dropping a boolean column in SQLite - # TODO move to alembic. - if migrate_engine.name != "sqlite": - drop_column("uninstalled", ToolDependency_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - ToolDependency_table = Table("tool_dependency", metadata, autoload=True) - if migrate_engine.name != "sqlite": - col = Column("uninstalled", Boolean, default=False) - add_column(col, ToolDependency_table, metadata) - - drop_column("error_message", ToolDependency_table) - drop_column("status", ToolDependency_table) diff --git a/lib/galaxy/model/migrate/versions/0103_add_tool_shed_repository_status_columns.py b/lib/galaxy/model/migrate/versions/0103_add_tool_shed_repository_status_columns.py deleted file mode 100644 index e98c41bc9c78..000000000000 --- a/lib/galaxy/model/migrate/versions/0103_add_tool_shed_repository_status_columns.py +++ /dev/null @@ -1,69 +0,0 @@ -"""Migration script to add status and error_message columns to the tool_shed_repository table.""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, - Table, - TEXT, -) - -# Need our custom types, but don't import anything else from model -from galaxy.model.custom_types import TrimmedString - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - ToolShedRepository_table = Table("tool_shed_repository", metadata, autoload=True) - # Add the status column to the tool_shed_repository table. - col = Column("status", TrimmedString(255)) - try: - col.create(ToolShedRepository_table) - assert col is ToolShedRepository_table.c.status - except Exception: - log.exception("Adding status column to the tool_shed_repository table failed.") - # Add the error_message column to the tool_shed_repository table. - col = Column("error_message", TEXT) - try: - col.create(ToolShedRepository_table) - assert col is ToolShedRepository_table.c.error_message - except Exception: - log.exception("Adding error_message column to the tool_shed_repository table failed.") - # Update the status column value for tool_shed_repositories to the default value 'Installed'. - cmd = "UPDATE tool_shed_repository SET status = 'Installed';" - try: - migrate_engine.execute(cmd) - except Exception: - log.exception("Exception executing SQL command: %s", cmd) - # Update the status column for tool_shed_repositories that have been uninstalled. - cmd = "UPDATE tool_shed_repository SET status = 'Uninstalled' WHERE uninstalled;" - try: - migrate_engine.execute(cmd) - except Exception: - log.exception("Exception executing SQL command: %s", cmd) - # Update the status column for tool_shed_repositories that have been deactivated. - cmd = "UPDATE tool_shed_repository SET status = 'Deactivated' where deleted and not uninstalled;" - try: - migrate_engine.execute(cmd) - except Exception: - log.exception("Exception executing SQL command: %s", cmd) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - ToolShedRepository_table = Table("tool_shed_repository", metadata, autoload=True) - try: - ToolShedRepository_table.c.status.drop() - except Exception: - log.exception("Dropping column status from the tool_shed_repository table failed.") - try: - ToolShedRepository_table.c.error_message.drop() - except Exception: - log.exception("Dropping column error_message from the tool_shed_repository table failed.") diff --git a/lib/galaxy/model/migrate/versions/0104_update_genome_downloader_job_parameters.py b/lib/galaxy/model/migrate/versions/0104_update_genome_downloader_job_parameters.py deleted file mode 100644 index 59d23193c4e9..000000000000 --- a/lib/galaxy/model/migrate/versions/0104_update_genome_downloader_job_parameters.py +++ /dev/null @@ -1,11 +0,0 @@ -""" -Migration script to update the deferred job parameters for liftover transfer jobs. -""" - - -def upgrade(migrate_engine): - print(__doc__) - - -def downgrade(migrate_engine): - pass diff --git a/lib/galaxy/model/migrate/versions/0105_add_cleanup_event_table.py b/lib/galaxy/model/migrate/versions/0105_add_cleanup_event_table.py deleted file mode 100644 index 578969f5dd3b..000000000000 --- a/lib/galaxy/model/migrate/versions/0105_add_cleanup_event_table.py +++ /dev/null @@ -1,148 +0,0 @@ -""" -Migration script to add the cleanup_event* tables. -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, -) - -from galaxy.model.custom_types import TrimmedString - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - -# New table to log cleanup events -CleanupEvent_table = Table( - "cleanup_event", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("message", TrimmedString(1024)), -) - -CleanupEventDatasetAssociation_table = Table( - "cleanup_event_dataset_association", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True), - Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True), -) - -CleanupEventMetadataFileAssociation_table = Table( - "cleanup_event_metadata_file_association", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True), - Column("metadata_file_id", Integer, ForeignKey("metadata_file.id"), index=True), -) - -CleanupEventHistoryAssociation_table = Table( - "cleanup_event_history_association", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True), - Column("history_id", Integer, ForeignKey("history.id"), index=True), -) - -CleanupEventHistoryDatasetAssociationAssociation_table = Table( - "cleanup_event_hda_association", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True), - Column("hda_id", Integer, ForeignKey("history_dataset_association.id"), index=True), -) - -CleanupEventLibraryAssociation_table = Table( - "cleanup_event_library_association", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True), - Column("library_id", Integer, ForeignKey("library.id"), index=True), -) - -CleanupEventLibraryFolderAssociation_table = Table( - "cleanup_event_library_folder_association", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True), - Column("library_folder_id", Integer, ForeignKey("library_folder.id"), index=True), -) - -CleanupEventLibraryDatasetAssociation_table = Table( - "cleanup_event_library_dataset_association", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True), - Column("library_dataset_id", Integer, ForeignKey("library_dataset.id"), index=True), -) - -CleanupEventLibraryDatasetDatasetAssociationAssociation_table = Table( - "cleanup_event_ldda_association", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True), - Column("ldda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True), -) - -CleanupEventImplicitlyConvertedDatasetAssociationAssociation_table = Table( - "cleanup_event_icda_association", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True), - Column("icda_id", Integer, ForeignKey("implicitly_converted_dataset_association.id"), index=True), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - try: - CleanupEvent_table.create() - CleanupEventDatasetAssociation_table.create() - CleanupEventMetadataFileAssociation_table.create() - CleanupEventHistoryAssociation_table.create() - CleanupEventHistoryDatasetAssociationAssociation_table.create() - CleanupEventLibraryAssociation_table.create() - CleanupEventLibraryFolderAssociation_table.create() - CleanupEventLibraryDatasetAssociation_table.create() - CleanupEventLibraryDatasetDatasetAssociationAssociation_table.create() - CleanupEventImplicitlyConvertedDatasetAssociationAssociation_table.create() - except Exception: - log.exception("Creating table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - try: - CleanupEventImplicitlyConvertedDatasetAssociationAssociation_table.drop() - CleanupEventLibraryDatasetDatasetAssociationAssociation_table.drop() - CleanupEventLibraryDatasetAssociation_table.drop() - CleanupEventLibraryFolderAssociation_table.drop() - CleanupEventLibraryAssociation_table.drop() - CleanupEventHistoryDatasetAssociationAssociation_table.drop() - CleanupEventHistoryAssociation_table.drop() - CleanupEventMetadataFileAssociation_table.drop() - CleanupEventDatasetAssociation_table.drop() - CleanupEvent_table.drop() - except Exception: - log.exception("Dropping table failed.") diff --git a/lib/galaxy/model/migrate/versions/0106_add_missing_indexes.py b/lib/galaxy/model/migrate/versions/0106_add_missing_indexes.py deleted file mode 100644 index 817b44d531f4..000000000000 --- a/lib/galaxy/model/migrate/versions/0106_add_missing_indexes.py +++ /dev/null @@ -1,70 +0,0 @@ -""" -Migration script to create missing indexes. Adding new columns to existing tables via SQLAlchemy does not create the index, even if the column definition includes index=True. -""" - -import logging - -from sqlalchemy import MetaData - -from galaxy.model.migrate.versions.util import ( - add_index, - drop_index, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -indexes = ( - ("ix_metadata_file_lda_id", "metadata_file", "lda_id"), # 0003 - ("ix_history_importable", "history", "importable"), # 0007 - ("ix_sample_bar_code", "sample", "bar_code"), # 0009 - ("ix_request_type_deleted", "request_type", "deleted"), # 0012 - ("ix_galaxy_user_username", "galaxy_user", "username"), # 0014 - ("ix_form_definition_type", "form_definition", "type"), # 0019 - ("ix_form_definition_layout", "form_definition", "layout"), # 0019 - ("ix_job_library_folder_id", "job", "library_folder_id"), # 0020 - ("ix_page_published", "page", "published"), # 0023 - ("ix_page_deleted", "page", "deleted"), # 0023 - ("ix_galaxy_user_form_values_id", "galaxy_user", "form_values_id"), # 0025 - ("ix_lia_deleted", "library_info_association", "deleted"), # 0036 - ("ix_lfia_deleted", "library_folder_info_association", "deleted"), # 0036 - ("ix_lddia_deleted", "library_dataset_dataset_info_association", "deleted"), # 0036 - ("ix_sample_library_id", "sample", "library_id"), # 0037 - ("ix_sample_folder_id", "sample", "folder_id"), # 0037 - ("ix_lia_inheritable", "library_info_association", "inheritable"), # 0038 - ("ix_lfia_inheritable", "library_folder_info_association", "inheritable"), # 0038 - ("ix_job_imported", "job", "imported"), # 0051 - ("ix_request_notification", "request", "notification"), # 0057 - ("ix_sd_external_service_id", "sample_dataset", "external_service_id"), # 0068 - ("ix_icda_ldda_parent_id", "implicitly_converted_dataset_association", "ldda_parent_id"), # 0073 - ("ix_library_dataset_purged", "library_dataset", "purged"), # 0074 - ("ix_run_subindex", "run", "subindex"), # 0075 - ("ix_history_dataset_association_purged", "history_dataset_association", "purged"), # 0078 - ("ix_galaxy_user_disk_usage", "galaxy_user", "disk_usage"), # 0078 - ("ix_galaxy_session_disk_usage", "galaxy_session", "disk_usage"), # 0078 - ("ix_icda_ldda_id", "implicitly_converted_dataset_association", "ldda_id"), # 0084 - ("ix_tsr_includes_datatypes", "tool_shed_repository", "includes_datatypes"), # 0086 - ("ix_dataset_object_store_id", "dataset", "object_store_id"), # 0089 - ("ix_job_object_store_id", "job", "object_store_id"), # 0089 - ("ix_metadata_file_object_store_id", "metadata_file", "object_store_id"), # 0089 - ("ix_job_handler", "job", "handler"), # 0094 - ("ix_galaxy_user_email", "galaxy_user", "email"), # 0106 -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - for ix, table, col in indexes: - add_index(ix, table, col, metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - # TODO: Dropping a column used in a foreign key fails in MySQL, need to remove the FK first. - for ix, table, col in indexes: - drop_index(ix, table, col, metadata) diff --git a/lib/galaxy/model/migrate/versions/0107_add_exit_code_to_job_and_task.py b/lib/galaxy/model/migrate/versions/0107_add_exit_code_to_job_and_task.py deleted file mode 100644 index deb279fd518e..000000000000 --- a/lib/galaxy/model/migrate/versions/0107_add_exit_code_to_job_and_task.py +++ /dev/null @@ -1,63 +0,0 @@ -""" -Add the exit_code column to the Job and Task tables. -""" - -import logging - -from sqlalchemy import ( - Column, - Integer, - MetaData, - Table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -# There was a bug when only one column was used for both tables, -# so create separate columns. -exit_code_job_col = Column("exit_code", Integer, nullable=True) -exit_code_task_col = Column("exit_code", Integer, nullable=True) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - # Add the exit_code column to the Job table. - try: - job_table = Table("job", metadata, autoload=True) - exit_code_job_col.create(job_table) - assert exit_code_job_col is job_table.c.exit_code - except Exception: - log.exception("Adding column 'exit_code' to job table failed.") - - # Add the exit_code column to the Task table. - try: - task_table = Table("task", metadata, autoload=True) - exit_code_task_col.create(task_table) - assert exit_code_task_col is task_table.c.exit_code - except Exception: - log.exception("Adding column 'exit_code' to task table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - # Drop the Job table's exit_code column. - try: - job_table = Table("job", metadata, autoload=True) - exit_code_col = job_table.c.exit_code - exit_code_col.drop() - except Exception: - log.exception("Dropping 'exit_code' column from job table failed.") - - # Drop the Job table's exit_code column. - try: - task_table = Table("task", metadata, autoload=True) - exit_code_col = task_table.c.exit_code - exit_code_col.drop() - except Exception: - log.exception("Dropping 'exit_code' column from task table failed.") diff --git a/lib/galaxy/model/migrate/versions/0108_add_extended_metadata.py b/lib/galaxy/model/migrate/versions/0108_add_extended_metadata.py deleted file mode 100644 index 8307074061d7..000000000000 --- a/lib/galaxy/model/migrate/versions/0108_add_extended_metadata.py +++ /dev/null @@ -1,69 +0,0 @@ -""" -Add the ExtendedMetadata and ExtendedMetadataIndex tables -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - String, - Table, - TEXT, -) - -from galaxy.model.custom_types import JSONType -from galaxy.model.migrate.versions.util import ( - add_column, - create_table, - drop_column, - drop_table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -ExtendedMetadata_table = Table( - "extended_metadata", metadata, Column("id", Integer, primary_key=True), Column("data", JSONType) -) - -ExtendedMetadataIndex_table = Table( - "extended_metadata_index", - metadata, - Column("id", Integer, primary_key=True), - Column( - "extended_metadata_id", - Integer, - ForeignKey("extended_metadata.id", onupdate="CASCADE", ondelete="CASCADE"), - index=True, - ), - Column("path", String(255)), - Column("value", TEXT), -) - -TABLES = [ExtendedMetadata_table, ExtendedMetadataIndex_table] - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - for table in TABLES: - create_table(table) - extended_metadata_ldda_col = Column( - "extended_metadata_id", Integer, ForeignKey("extended_metadata.id"), nullable=True - ) - add_column(extended_metadata_ldda_col, "library_dataset_dataset_association", metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - # TODO: Dropping a column used in a foreign key fails in MySQL, need to remove the FK first. - drop_column("extended_metadata_id", "library_dataset_dataset_association", metadata) - for table in reversed(TABLES): - drop_table(table) diff --git a/lib/galaxy/model/migrate/versions/0109_add_repository_dependency_tables.py b/lib/galaxy/model/migrate/versions/0109_add_repository_dependency_tables.py deleted file mode 100644 index 8bd63e6ba1d1..000000000000 --- a/lib/galaxy/model/migrate/versions/0109_add_repository_dependency_tables.py +++ /dev/null @@ -1,60 +0,0 @@ -""" -Migration script to add the repository_dependency and repository_repository_dependency_association tables. -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, -) - -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - -RepositoryDependency_table = Table( - "repository_dependency", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("tool_shed_repository_id", Integer, ForeignKey("tool_shed_repository.id"), index=True, nullable=False), -) - -RepositoryRepositoryDependencyAssociation_table = Table( - "repository_repository_dependency_association", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("tool_shed_repository_id", Integer, ForeignKey("tool_shed_repository.id"), index=True), - Column("repository_dependency_id", Integer, ForeignKey("repository_dependency.id"), index=True), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(RepositoryDependency_table) - create_table(RepositoryRepositoryDependencyAssociation_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_table(RepositoryRepositoryDependencyAssociation_table) - drop_table(RepositoryDependency_table) diff --git a/lib/galaxy/model/migrate/versions/0110_add_dataset_uuid.py b/lib/galaxy/model/migrate/versions/0110_add_dataset_uuid.py deleted file mode 100644 index 8af310b03d28..000000000000 --- a/lib/galaxy/model/migrate/versions/0110_add_dataset_uuid.py +++ /dev/null @@ -1,45 +0,0 @@ -""" -Add UUID column to dataset table -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, - Table, -) - -from galaxy.model.custom_types import UUIDType - -log = logging.getLogger(__name__) -dataset_uuid_column = Column("uuid", UUIDType, nullable=True) - - -def upgrade(migrate_engine): - print(__doc__) - metadata = MetaData() - metadata.bind = migrate_engine - metadata.reflect() - - # Add the uuid colum to the dataset table - try: - dataset_table = Table("dataset", metadata, autoload=True) - dataset_uuid_column.create(dataset_table) - assert dataset_uuid_column is dataset_table.c.uuid - except Exception: - log.exception("Adding column 'uuid' to dataset table failed.") - - -def downgrade(migrate_engine): - metadata = MetaData() - metadata.bind = migrate_engine - metadata.reflect() - - # Drop the dataset table's uuid column. - try: - dataset_table = Table("dataset", metadata, autoload=True) - dataset_uuid = dataset_table.c.uuid - dataset_uuid.drop() - except Exception: - log.exception("Dropping 'uuid' column from dataset table failed.") diff --git a/lib/galaxy/model/migrate/versions/0111_add_job_destinations.py b/lib/galaxy/model/migrate/versions/0111_add_job_destinations.py deleted file mode 100644 index 0cc3bda90499..000000000000 --- a/lib/galaxy/model/migrate/versions/0111_add_job_destinations.py +++ /dev/null @@ -1,55 +0,0 @@ -""" -Add support for job destinations to the job table -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, - String, - Table, -) - -from galaxy.model.custom_types import JSONType - -log = logging.getLogger(__name__) - - -def upgrade(migrate_engine): - print(__doc__) - metadata = MetaData() - metadata.bind = migrate_engine - metadata.reflect() - Job_table = Table("job", metadata, autoload=True) - - c = Column("destination_id", String(255), nullable=True) - try: - c.create(Job_table) - assert c is Job_table.c.destination_id - except Exception: - log.exception("Adding column 'destination_id' to job table failed.") - - c = Column("destination_params", JSONType, nullable=True) - try: - c.create(Job_table) - assert c is Job_table.c.destination_params - except Exception: - log.exception("Adding column 'destination_params' to job table failed.") - - -def downgrade(migrate_engine): - metadata = MetaData() - metadata.bind = migrate_engine - metadata.reflect() - Job_table = Table("job", metadata, autoload=True) - - try: - Job_table.c.destination_params.drop() - except Exception: - log.exception("Dropping column 'destination_params' from job table failed.") - - try: - Job_table.c.destination_id.drop() - except Exception: - log.exception("Dropping column 'destination_id' from job table failed.") diff --git a/lib/galaxy/model/migrate/versions/0112_add_data_manager_history_association_and_data_manager_job_association_tables.py b/lib/galaxy/model/migrate/versions/0112_add_data_manager_history_association_and_data_manager_job_association_tables.py deleted file mode 100644 index 6451924d7d98..000000000000 --- a/lib/galaxy/model/migrate/versions/0112_add_data_manager_history_association_and_data_manager_job_association_tables.py +++ /dev/null @@ -1,66 +0,0 @@ -""" -Migration script to add the data_manager_history_association table and data_manager_job_association. -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Index, - Integer, - MetaData, - Table, - TEXT, -) - -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - -DataManagerHistoryAssociation_table = Table( - "data_manager_history_association", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, index=True, default=now, onupdate=now), - Column("history_id", Integer, ForeignKey("history.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), -) - -DataManagerJobAssociation_table = Table( - "data_manager_job_association", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, index=True, default=now, onupdate=now), - Column("job_id", Integer, ForeignKey("job.id"), index=True), - Column("data_manager_id", TEXT), - Index("ix_data_manager_job_association_data_manager_id", "data_manager_id", mysql_length=200), -) - -TABLES = [DataManagerHistoryAssociation_table, DataManagerJobAssociation_table] - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - for table in TABLES: - create_table(table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - for table in TABLES: - drop_table(table) diff --git a/lib/galaxy/model/migrate/versions/0113_update_migrate_tools_table.py b/lib/galaxy/model/migrate/versions/0113_update_migrate_tools_table.py deleted file mode 100644 index 64f8ca530f2f..000000000000 --- a/lib/galaxy/model/migrate/versions/0113_update_migrate_tools_table.py +++ /dev/null @@ -1,29 +0,0 @@ -""" -Migration script to update the migrate_tools.repository_path column to point to the new location lib/tool_shed/galaxy_install/migrate. -""" - -import logging - -log = logging.getLogger(__name__) - - -def upgrade(migrate_engine): - print(__doc__) - # Create the table. - try: - cmd = "UPDATE migrate_tools set repository_path='lib/galaxy/tool_shed/migrate';" - migrate_engine.execute(cmd) - except Exception: - log.exception( - "Updating migrate_tools.repository_path column to point to the new location lib/tool_shed/galaxy_install/migrate failed." - ) - - -def downgrade(migrate_engine): - try: - cmd = "UPDATE migrate_tools set repository_path='lib/galaxy/tool_shed/migrate';" - migrate_engine.execute(cmd) - except Exception: - log.exception( - "Updating migrate_tools.repository_path column to point to the old location lib/galaxy/tool_shed/migrate failed." - ) diff --git a/lib/galaxy/model/migrate/versions/0114_update_migrate_tools_table_again.py b/lib/galaxy/model/migrate/versions/0114_update_migrate_tools_table_again.py deleted file mode 100644 index a8f25bd807dc..000000000000 --- a/lib/galaxy/model/migrate/versions/0114_update_migrate_tools_table_again.py +++ /dev/null @@ -1,29 +0,0 @@ -""" -Migration script to update the migrate_tools.repository_path column to point to the new location lib/tool_shed/galaxy_install/migrate. -""" - -import logging - -log = logging.getLogger(__name__) - - -def upgrade(migrate_engine): - print(__doc__) - # Create the table. - try: - cmd = "UPDATE migrate_tools set repository_path='lib/tool_shed/galaxy_install/migrate';" - migrate_engine.execute(cmd) - except Exception: - log.exception( - "Updating migrate_tools.repository_path column to point to the new location lib/tool_shed/galaxy_install/migrate failed." - ) - - -def downgrade(migrate_engine): - try: - cmd = "UPDATE migrate_tools set repository_path='lib/galaxy/tool_shed/migrate';" - migrate_engine.execute(cmd) - except Exception: - log.exception( - "Updating migrate_tools.repository_path column to point to the old location lib/galaxy/tool_shed/migrate failed." - ) diff --git a/lib/galaxy/model/migrate/versions/0115_longer_user_password_field.py b/lib/galaxy/model/migrate/versions/0115_longer_user_password_field.py deleted file mode 100644 index e5317271b578..000000000000 --- a/lib/galaxy/model/migrate/versions/0115_longer_user_password_field.py +++ /dev/null @@ -1,25 +0,0 @@ -""" -Expand the length of the password fields in the galaxy_user table to allow for other hasing schemes -""" -import logging - -from sqlalchemy import ( - MetaData, - String, - Table, -) - -log = logging.getLogger(__name__) - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - user = Table("galaxy_user", meta, autoload=True) - try: - user.c.password.alter(type=String(255)) - except Exception: - log.exception("Altering password column failed") - - -def downgrade(migrate_engine): - pass diff --git a/lib/galaxy/model/migrate/versions/0116_drop_update_available_col_add_tool_shed_status_col.py b/lib/galaxy/model/migrate/versions/0116_drop_update_available_col_add_tool_shed_status_col.py deleted file mode 100644 index fb6edb483841..000000000000 --- a/lib/galaxy/model/migrate/versions/0116_drop_update_available_col_add_tool_shed_status_col.py +++ /dev/null @@ -1,49 +0,0 @@ -""" -Migration script to drop the update_available Boolean column and replace it with the tool_shed_status JSONType column in the tool_shed_repository table. -""" - -import logging - -from sqlalchemy import ( - Boolean, - Column, - MetaData, - Table, -) - -from galaxy.model.custom_types import JSONType -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, - engine_false, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - ToolShedRepository_table = Table("tool_shed_repository", metadata, autoload=True) - # SQLAlchemy Migrate has a bug when dropping a boolean column in SQLite - if migrate_engine.name != "sqlite": - drop_column("update_available", ToolShedRepository_table) - c = Column("tool_shed_status", JSONType, nullable=True) - add_column(c, ToolShedRepository_table, metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - ToolShedRepository_table = Table("tool_shed_repository", metadata, autoload=True) - drop_column("tool_shed_status", ToolShedRepository_table) - c = Column("update_available", Boolean, default=False) - add_column(c, ToolShedRepository_table, metadata) - try: - migrate_engine.execute(f"UPDATE tool_shed_repository SET update_available={engine_false(migrate_engine)}") - except Exception: - log.exception("Updating column 'update_available' of table 'tool_shed_repository' failed.") diff --git a/lib/galaxy/model/migrate/versions/0117_add_user_activation.py b/lib/galaxy/model/migrate/versions/0117_add_user_activation.py deleted file mode 100644 index dfb21022a20f..000000000000 --- a/lib/galaxy/model/migrate/versions/0117_add_user_activation.py +++ /dev/null @@ -1,53 +0,0 @@ -""" -Adds 'active' and 'activation_token' columns to the galaxy_user table. -""" - -import logging - -from sqlalchemy import ( - Boolean, - Column, - MetaData, - Table, -) - -from galaxy.model.custom_types import TrimmedString - -log = logging.getLogger(__name__) -user_active_column = Column("active", Boolean, default=True, nullable=True) -user_activation_token_column = Column("activation_token", TrimmedString(64), nullable=True) - - -def upgrade(migrate_engine): - print(__doc__) - metadata = MetaData() - metadata.bind = migrate_engine - metadata.reflect() - - # Add the active and activation_token columns to the user table in one try because the depend on each other. - try: - user_table = Table("galaxy_user", metadata, autoload=True) - user_activation_token_column.create(table=user_table) - assert user_activation_token_column is user_table.c.activation_token - user_active_column.create(table=user_table, populate_default=True) - assert user_active_column is user_table.c.active - except Exception: - log.exception("Adding columns 'active' and 'activation_token' to galaxy_user table failed.") - - -def downgrade(migrate_engine): - metadata = MetaData() - metadata.bind = migrate_engine - metadata.reflect() - - # Drop the user table's active and activation_token columns in one try because the depend on each other. - try: - user_table = Table("galaxy_user", metadata, autoload=True) - # SQLAlchemy Migrate has a bug when dropping a boolean column in SQLite - if migrate_engine.name != "sqlite": - user_active = user_table.c.active - user_active.drop() - user_activation_token = user_table.c.activation_token - user_activation_token.drop() - except Exception: - log.exception("Dropping 'active' and 'activation_token' columns from galaxy_user table failed.") diff --git a/lib/galaxy/model/migrate/versions/0118_add_hda_extended_metadata.py b/lib/galaxy/model/migrate/versions/0118_add_hda_extended_metadata.py deleted file mode 100644 index e5b216759a93..000000000000 --- a/lib/galaxy/model/migrate/versions/0118_add_hda_extended_metadata.py +++ /dev/null @@ -1,43 +0,0 @@ -""" -Add link from history_dataset_association to the extended_metadata table -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - Table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() -extended_metadata_hda_col = Column("extended_metadata_id", Integer, ForeignKey("extended_metadata.id"), nullable=True) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - try: - hda_table = Table("history_dataset_association", metadata, autoload=True) - extended_metadata_hda_col.create(hda_table) - assert extended_metadata_hda_col is hda_table.c.extended_metadata_id - except Exception: - log.exception("Adding column 'extended_metadata_id' to history_dataset_association table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - # Drop the HDA table's extended metadata ID column. - try: - hda_table = Table("history_dataset_association", metadata, autoload=True) - extended_metadata_id = hda_table.c.extended_metadata_id - extended_metadata_id.drop() - except Exception: - log.exception("Dropping 'extended_metadata_id' column from history_dataset_association table failed.") diff --git a/lib/galaxy/model/migrate/versions/0119_job_metrics.py b/lib/galaxy/model/migrate/versions/0119_job_metrics.py deleted file mode 100644 index e2aee047ba95..000000000000 --- a/lib/galaxy/model/migrate/versions/0119_job_metrics.py +++ /dev/null @@ -1,129 +0,0 @@ -""" -Migration script for job metric plugins. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - Numeric, - Table, - Unicode, -) - -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -TEXT_METRIC_MAX_LENGTH = 1023 - -JobMetricText_table = Table( - "job_metric_text", - metadata, - Column("id", Integer, primary_key=True), - Column("job_id", Integer, ForeignKey("job.id"), index=True), - Column( - "plugin", - Unicode(255), - ), - Column( - "metric_name", - Unicode(255), - ), - Column( - "metric_value", - Unicode(TEXT_METRIC_MAX_LENGTH), - ), -) - - -TaskMetricText_table = Table( - "task_metric_text", - metadata, - Column("id", Integer, primary_key=True), - Column("task_id", Integer, ForeignKey("task.id"), index=True), - Column( - "plugin", - Unicode(255), - ), - Column( - "metric_name", - Unicode(255), - ), - Column( - "metric_value", - Unicode(TEXT_METRIC_MAX_LENGTH), - ), -) - - -JobMetricNumeric_table = Table( - "job_metric_numeric", - metadata, - Column("id", Integer, primary_key=True), - Column("job_id", Integer, ForeignKey("job.id"), index=True), - Column( - "plugin", - Unicode(255), - ), - Column( - "metric_name", - Unicode(255), - ), - Column( - "metric_value", - Numeric(22, 7), - ), -) - - -TaskMetricNumeric_table = Table( - "task_metric_numeric", - metadata, - Column("id", Integer, primary_key=True), - Column("task_id", Integer, ForeignKey("task.id"), index=True), - Column( - "plugin", - Unicode(255), - ), - Column( - "metric_name", - Unicode(255), - ), - Column( - "metric_value", - Numeric(22, 7), - ), -) - - -TABLES = [ - JobMetricText_table, - TaskMetricText_table, - JobMetricNumeric_table, - TaskMetricNumeric_table, -] - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - for table in TABLES: - create_table(table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - for table in TABLES: - drop_table(table) diff --git a/lib/galaxy/model/migrate/versions/0120_dataset_collections.py b/lib/galaxy/model/migrate/versions/0120_dataset_collections.py deleted file mode 100644 index fde673f78a4d..000000000000 --- a/lib/galaxy/model/migrate/versions/0120_dataset_collections.py +++ /dev/null @@ -1,228 +0,0 @@ -""" -Migration script for tables related to dataset collections. -""" - -import datetime -import logging - -from sqlalchemy import ( - Boolean, - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, - TEXT, - Unicode, -) - -from galaxy.model.custom_types import TrimmedString -from galaxy.model.migrate.versions.util import ( - add_column, - create_table, - drop_column, - drop_table, -) - -now = datetime.datetime.utcnow -log = logging.getLogger(__name__) -metadata = MetaData() - -DatasetCollection_table = Table( - "dataset_collection", - metadata, - Column("id", Integer, primary_key=True), - Column( - "collection_type", - Unicode(255), - nullable=False, - ), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), -) - -HistoryDatasetCollectionAssociation_table = Table( - "history_dataset_collection_association", - metadata, - Column("id", Integer, primary_key=True), - Column("collection_id", Integer, ForeignKey("dataset_collection.id"), index=True), - Column("history_id", Integer, ForeignKey("history.id"), index=True), - Column("hid", Integer), - Column("name", TrimmedString(255)), - Column("deleted", Boolean, default=False), - Column("visible", Boolean, default=True), - Column( - "copied_from_history_dataset_collection_association_id", - Integer, - ForeignKey("history_dataset_collection_association.id"), - nullable=True, - ), - Column("implicit_output_name", Unicode(255), nullable=True), -) - -LibraryDatasetCollectionAssociation_table = Table( - "library_dataset_collection_association", - metadata, - Column("id", Integer, primary_key=True), - Column("collection_id", Integer, ForeignKey("dataset_collection.id"), index=True), - Column("name", TrimmedString(255)), - Column("deleted", Boolean, default=False), - Column("folder_id", Integer, ForeignKey("library_folder.id"), index=True), -) - -DatasetCollectionElement_table = Table( - "dataset_collection_element", - metadata, - Column("id", Integer, primary_key=True), - Column("dataset_collection_id", Integer, ForeignKey("dataset_collection.id"), index=True, nullable=False), - Column("hda_id", Integer, ForeignKey("history_dataset_association.id"), index=True, nullable=True), - Column("ldda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True, nullable=True), - Column("child_collection_id", Integer, ForeignKey("dataset_collection.id"), index=True, nullable=True), - Column("element_index", Integer, nullable=False), - Column("element_identifier", Unicode(255), nullable=False), -) - -HistoryDatasetCollectionAnnotationAssociation_table = Table( - "history_dataset_collection_annotation_association", - metadata, - Column("id", Integer, primary_key=True), - Column( - "history_dataset_collection_id", Integer, ForeignKey("history_dataset_collection_association.id"), index=True - ), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("annotation", TEXT), -) - -LibraryDatasetCollectionAnnotationAssociation_table = Table( - "library_dataset_collection_annotation_association", - metadata, - Column("id", Integer, primary_key=True), - Column( - "library_dataset_collection_id", Integer, ForeignKey("library_dataset_collection_association.id"), index=True - ), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("annotation", TEXT), -) - -HistoryDatasetCollectionRatingAssociation_table = Table( - "history_dataset_collection_rating_association", - metadata, - Column("id", Integer, primary_key=True), - Column( - "history_dataset_collection_id", Integer, ForeignKey("history_dataset_collection_association.id"), index=True - ), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("rating", Integer, index=True), -) - -LibraryDatasetCollectionRatingAssociation_table = Table( - "library_dataset_collection_rating_association", - metadata, - Column("id", Integer, primary_key=True), - Column( - "library_dataset_collection_id", Integer, ForeignKey("library_dataset_collection_association.id"), index=True - ), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("rating", Integer, index=True), -) - -HistoryDatasetCollectionTagAssociation_table = Table( - "history_dataset_collection_tag_association", - metadata, - Column("id", Integer, primary_key=True), - Column( - "history_dataset_collection_id", Integer, ForeignKey("history_dataset_collection_association.id"), index=True - ), - Column("tag_id", Integer, ForeignKey("tag.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("user_tname", Unicode(255), index=True), - Column("value", Unicode(255), index=True), - Column("user_value", Unicode(255), index=True), -) - -LibraryDatasetCollectionTagAssociation_table = Table( - "library_dataset_collection_tag_association", - metadata, - Column("id", Integer, primary_key=True), - Column( - "library_dataset_collection_id", Integer, ForeignKey("library_dataset_collection_association.id"), index=True - ), - Column("tag_id", Integer, ForeignKey("tag.id"), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("user_tname", Unicode(255), index=True), - Column("value", Unicode(255), index=True), - Column("user_value", Unicode(255), index=True), -) - -JobToInputDatasetCollectionAssociation_table = Table( - "job_to_input_dataset_collection", - metadata, - Column("id", Integer, primary_key=True), - Column("job_id", Integer, ForeignKey("job.id"), index=True), - Column("dataset_collection_id", Integer, ForeignKey("history_dataset_collection_association.id"), index=True), - Column("name", Unicode(255)), -) - -JobToOutputDatasetCollectionAssociation_table = Table( - "job_to_output_dataset_collection", - metadata, - Column("id", Integer, primary_key=True), - Column("job_id", Integer, ForeignKey("job.id"), index=True), - Column("dataset_collection_id", Integer, ForeignKey("history_dataset_collection_association.id"), index=True), - Column("name", Unicode(255)), -) - -ImplicitlyCreatedDatasetCollectionInput_table = Table( - "implicitly_created_dataset_collection_inputs", - metadata, - Column("id", Integer, primary_key=True), - Column("dataset_collection_id", Integer, ForeignKey("history_dataset_collection_association.id"), index=True), - Column("input_dataset_collection_id", Integer, ForeignKey("history_dataset_collection_association.id"), index=True), - Column("name", Unicode(255)), -) - - -TABLES = [ - DatasetCollection_table, - HistoryDatasetCollectionAssociation_table, - LibraryDatasetCollectionAssociation_table, - DatasetCollectionElement_table, - JobToInputDatasetCollectionAssociation_table, - JobToOutputDatasetCollectionAssociation_table, - ImplicitlyCreatedDatasetCollectionInput_table, - HistoryDatasetCollectionAnnotationAssociation_table, - HistoryDatasetCollectionRatingAssociation_table, - HistoryDatasetCollectionTagAssociation_table, - LibraryDatasetCollectionAnnotationAssociation_table, - LibraryDatasetCollectionRatingAssociation_table, - LibraryDatasetCollectionTagAssociation_table, -] - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - for table in TABLES: - create_table(table) - - # TODO: Find a better name for this column... - HiddenBeneathCollection_column = Column( - "hidden_beneath_collection_instance_id", - Integer, - ForeignKey("history_dataset_collection_association.id"), - nullable=True, - ) - add_column(HiddenBeneathCollection_column, "history_dataset_association", metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("hidden_beneath_collection_instance_id", "history_dataset_association", metadata) - - for table in reversed(TABLES): - drop_table(table) diff --git a/lib/galaxy/model/migrate/versions/0121_workflow_uuids.py b/lib/galaxy/model/migrate/versions/0121_workflow_uuids.py deleted file mode 100644 index c49982af39db..000000000000 --- a/lib/galaxy/model/migrate/versions/0121_workflow_uuids.py +++ /dev/null @@ -1,51 +0,0 @@ -""" -Add UUIDs to workflows -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, - Table, -) - -from galaxy.model.custom_types import UUIDType - -log = logging.getLogger(__name__) -metadata = MetaData() - - -""" -Because both workflow and job requests can be determined -based the a fixed data structure, their IDs are based on -hashing the data structure -""" -workflow_uuid_column = Column("uuid", UUIDType, nullable=True) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - # Add the uuid colum to the workflow table - try: - workflow_table = Table("workflow", metadata, autoload=True) - workflow_uuid_column.create(workflow_table) - assert workflow_uuid_column is workflow_table.c.uuid - except Exception: - log.exception("Adding column 'uuid' to workflow table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - # Drop the workflow table's uuid column. - try: - workflow_table = Table("workflow", metadata, autoload=True) - workflow_uuid = workflow_table.c.uuid - workflow_uuid.drop() - except Exception: - log.exception("Dropping 'uuid' column from workflow table failed.") diff --git a/lib/galaxy/model/migrate/versions/0122_grow_mysql_blobs.py b/lib/galaxy/model/migrate/versions/0122_grow_mysql_blobs.py deleted file mode 100644 index 2092764af8cd..000000000000 --- a/lib/galaxy/model/migrate/versions/0122_grow_mysql_blobs.py +++ /dev/null @@ -1,51 +0,0 @@ -""" -Migration script to grow MySQL blobs. -""" - -import logging - -from sqlalchemy import MetaData - -log = logging.getLogger(__name__) -metadata = MetaData() - -BLOB_COLUMNS = [ - ("deferred_job", "params"), - ("extended_metadata", "data"), - ("form_definition", "fields"), - ("form_definition", "layout"), - ("form_values", "content"), - ("history_dataset_association", "metadata"), - ("job", "destination_params"), - ("library_dataset_dataset_association", "metadata"), - ("post_job_action", "action_arguments"), - ("request", "notification"), - ("sample", "workflow"), - ("transfer_job", "params"), - ("workflow_step", "tool_inputs"), - ("workflow_step", "tool_errors"), - ("workflow_step", "position"), - ("workflow_step", "config"), - ("tool_shed_repository", "metadata"), - ("tool_shed_repository", "tool_shed_status"), -] - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - if migrate_engine.name != "mysql": - return - - for (table, column) in BLOB_COLUMNS: - cmd = f"ALTER TABLE {table} MODIFY COLUMN {column} MEDIUMBLOB;" - try: - migrate_engine.execute(cmd) - except Exception: - log.exception("Failed to grow column %s.%s", table, column) - - -def downgrade(migrate_engine): - pass diff --git a/lib/galaxy/model/migrate/versions/0123_add_workflow_request_tables.py b/lib/galaxy/model/migrate/versions/0123_add_workflow_request_tables.py deleted file mode 100644 index 62cc4b810fee..000000000000 --- a/lib/galaxy/model/migrate/versions/0123_add_workflow_request_tables.py +++ /dev/null @@ -1,133 +0,0 @@ -""" -Migration script for workflow request tables. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - String, - Table, - TEXT, - Unicode, -) - -from galaxy.model.custom_types import ( - JSONType, - TrimmedString, - UUIDType, -) -from galaxy.model.migrate.versions.util import ( - add_column, - create_table, - drop_column, - drop_table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -WorkflowRequestInputParameter_table = Table( - "workflow_request_input_parameters", - metadata, - Column("id", Integer, primary_key=True), - Column( - "workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id", onupdate="CASCADE", ondelete="CASCADE") - ), - Column("name", Unicode(255)), - Column("type", Unicode(255)), - Column("value", TEXT), -) - - -WorkflowRequestStepState_table = Table( - "workflow_request_step_states", - metadata, - Column("id", Integer, primary_key=True), - Column( - "workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id", onupdate="CASCADE", ondelete="CASCADE") - ), - Column("workflow_step_id", Integer, ForeignKey("workflow_step.id")), - Column("value", JSONType), -) - - -WorkflowRequestToInputDatasetAssociation_table = Table( - "workflow_request_to_input_dataset", - metadata, - Column("id", Integer, primary_key=True), - Column("name", String(255)), - Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True), - Column("workflow_step_id", Integer, ForeignKey("workflow_step.id")), - Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True), -) - - -WorkflowRequestToInputDatasetCollectionAssociation_table = Table( - "workflow_request_to_input_collection_dataset", - metadata, - Column("id", Integer, primary_key=True), - Column("name", String(255)), - Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True), - Column("workflow_step_id", Integer, ForeignKey("workflow_step.id")), - Column("dataset_collection_id", Integer, ForeignKey("history_dataset_collection_association.id"), index=True), -) - - -TABLES = [ - WorkflowRequestInputParameter_table, - WorkflowRequestStepState_table, - WorkflowRequestToInputDatasetAssociation_table, - WorkflowRequestToInputDatasetCollectionAssociation_table, -] - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - for table in TABLES: - create_table(table) - - History_column = Column("history_id", Integer, ForeignKey("history.id"), nullable=True) - State_column = Column("state", TrimmedString(64)) - - # TODO: Handle indexes correctly - SchedulerId_column = Column("scheduler", TrimmedString(255)) - HandlerId_column = Column("handler", TrimmedString(255)) - WorkflowUUID_column = Column("uuid", UUIDType, nullable=True) - add_column(History_column, "workflow_invocation", metadata) - add_column(State_column, "workflow_invocation", metadata) - add_column(SchedulerId_column, "workflow_invocation", metadata, index_name="id_workflow_invocation_scheduler") - add_column(HandlerId_column, "workflow_invocation", metadata, index_name="id_workflow_invocation_handler") - add_column(WorkflowUUID_column, "workflow_invocation", metadata) - - # All previous invocations have been scheduled... - cmd = "UPDATE workflow_invocation SET state = 'scheduled'" - try: - migrate_engine.execute(cmd) - except Exception: - log.exception("failed to update past workflow invocation states.") - - WorkflowInvocationStepAction_column = Column("action", JSONType, nullable=True) - add_column(WorkflowInvocationStepAction_column, "workflow_invocation_step", metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - for table in TABLES: - drop_table(table) - - drop_column("state", "workflow_invocation", metadata) - drop_column("scheduler", "workflow_invocation", metadata) - drop_column("uuid", "workflow_invocation", metadata) - drop_column("history_id", "workflow_invocation", metadata) - drop_column("handler", "workflow_invocation", metadata) - drop_column("action", "workflow_invocation_step", metadata) diff --git a/lib/galaxy/model/migrate/versions/0124_job_state_history.py b/lib/galaxy/model/migrate/versions/0124_job_state_history.py deleted file mode 100644 index ea9dcfc679c9..000000000000 --- a/lib/galaxy/model/migrate/versions/0124_job_state_history.py +++ /dev/null @@ -1,52 +0,0 @@ -""" -Migration script for the job state history table -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - String, - Table, -) - -from galaxy.model.custom_types import TrimmedString -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -now = datetime.datetime.utcnow -log = logging.getLogger(__name__) -metadata = MetaData() - -JobStateHistory_table = Table( - "job_state_history", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("job_id", Integer, ForeignKey("job.id"), index=True), - Column("state", String(64), index=True), - Column("info", TrimmedString(255)), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(JobStateHistory_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_table(JobStateHistory_table) diff --git a/lib/galaxy/model/migrate/versions/0125_workflow_step_tracking.py b/lib/galaxy/model/migrate/versions/0125_workflow_step_tracking.py deleted file mode 100644 index 155e0d605064..000000000000 --- a/lib/galaxy/model/migrate/versions/0125_workflow_step_tracking.py +++ /dev/null @@ -1,41 +0,0 @@ -""" -Migration script to enhance workflow step usability by adding labels and UUIDs. -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, -) - -from galaxy.model.custom_types import ( - TrimmedString, - UUIDType, -) -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - - StepLabel_column = Column("label", TrimmedString(255)) - StepUUID_column = Column("uuid", UUIDType, nullable=True) - add_column(StepLabel_column, "workflow_step", metadata) - add_column(StepUUID_column, "workflow_step", metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("label", "workflow_step", metadata) - drop_column("uuid", "workflow_step", metadata) diff --git a/lib/galaxy/model/migrate/versions/0126_password_reset.py b/lib/galaxy/model/migrate/versions/0126_password_reset.py deleted file mode 100644 index 27124de1006a..000000000000 --- a/lib/galaxy/model/migrate/versions/0126_password_reset.py +++ /dev/null @@ -1,46 +0,0 @@ -""" -Migration script for the password reset table -""" - -import logging - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - String, - Table, -) - -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -PasswordResetToken_table = Table( - "password_reset_token", - metadata, - Column("token", String(32), primary_key=True, unique=True, index=True), - Column("expiration_time", DateTime), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(PasswordResetToken_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_table(PasswordResetToken_table) diff --git a/lib/galaxy/model/migrate/versions/0127_output_collection_adjustments.py b/lib/galaxy/model/migrate/versions/0127_output_collection_adjustments.py deleted file mode 100644 index 6346bff0d267..000000000000 --- a/lib/galaxy/model/migrate/versions/0127_output_collection_adjustments.py +++ /dev/null @@ -1,64 +0,0 @@ -""" -Migration script updating collections tables for output collections. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - Table, - TEXT, - Unicode, -) - -from galaxy.model.custom_types import TrimmedString -from galaxy.model.migrate.versions.util import ( - add_column, - create_table, - drop_column, - drop_table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -JobToImplicitOutputDatasetCollectionAssociation_table = Table( - "job_to_implicit_output_dataset_collection", - metadata, - Column("id", Integer, primary_key=True), - Column("job_id", Integer, ForeignKey("job.id"), index=True), - Column("dataset_collection_id", Integer, ForeignKey("dataset_collection.id"), index=True), - Column("name", Unicode(255)), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(JobToImplicitOutputDatasetCollectionAssociation_table) - - dataset_collection_table = Table("dataset_collection", metadata, autoload=True) - # need server_default because column in non-null - populated_state_column = Column( - "populated_state", TrimmedString(64), default="ok", server_default="ok", nullable=False - ) - add_column(populated_state_column, dataset_collection_table, metadata) - - populated_message_column = Column("populated_state_message", TEXT, nullable=True) - add_column(populated_message_column, dataset_collection_table, metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_table(JobToImplicitOutputDatasetCollectionAssociation_table) - - dataset_collection_table = Table("dataset_collection", metadata, autoload=True) - drop_column("populated_state", dataset_collection_table) - drop_column("populated_state_message", dataset_collection_table) diff --git a/lib/galaxy/model/migrate/versions/0128_session_timeout.py b/lib/galaxy/model/migrate/versions/0128_session_timeout.py deleted file mode 100644 index 062242585fcb..000000000000 --- a/lib/galaxy/model/migrate/versions/0128_session_timeout.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -Migration script to add session update time (used for timeouts) -""" - -import logging - -from sqlalchemy import ( - Column, - DateTime, - MetaData, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - lastaction_column = Column("last_action", DateTime) - add_column(lastaction_column, "galaxy_session", metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("last_action", "galaxy_session", metadata) diff --git a/lib/galaxy/model/migrate/versions/0129_job_external_output_metadata_validity.py b/lib/galaxy/model/migrate/versions/0129_job_external_output_metadata_validity.py deleted file mode 100644 index 2f6ad569bc2f..000000000000 --- a/lib/galaxy/model/migrate/versions/0129_job_external_output_metadata_validity.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -Migration script to allow invalidation of job external output metadata temp files -""" - -import logging - -from sqlalchemy import ( - Boolean, - Column, - MetaData, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - - isvalid_column = Column("is_valid", Boolean, default=True) - add_column(isvalid_column, "job_external_output_metadata", metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - # SQLAlchemy Migrate has a bug when dropping a boolean column in SQLite - if migrate_engine.name != "sqlite": - drop_column("is_valid", "job_external_output_metadata", metadata) diff --git a/lib/galaxy/model/migrate/versions/0130_change_pref_datatype.py b/lib/galaxy/model/migrate/versions/0130_change_pref_datatype.py deleted file mode 100644 index e5ed362792d0..000000000000 --- a/lib/galaxy/model/migrate/versions/0130_change_pref_datatype.py +++ /dev/null @@ -1,28 +0,0 @@ -""" -Migration script to change the 'value' column of 'user_preference' table from varchar to text. -""" - -import logging - -from sqlalchemy import ( - MetaData, - Table, - Text, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - t = Table("user_preference", metadata, autoload=True) - t.c.value.alter(type=Text) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - # Pass, since we don't want to potentially truncate data. diff --git a/lib/galaxy/model/migrate/versions/0131_subworkflow_and_input_parameter_modules.py b/lib/galaxy/model/migrate/versions/0131_subworkflow_and_input_parameter_modules.py deleted file mode 100644 index a75d820a66a3..000000000000 --- a/lib/galaxy/model/migrate/versions/0131_subworkflow_and_input_parameter_modules.py +++ /dev/null @@ -1,115 +0,0 @@ -""" -Migration script to support subworkflows and workflow request input parameters -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - ForeignKeyConstraint, - Index, - Integer, - MetaData, - Table, -) - -from galaxy.model.custom_types import ( - JSONType, - TrimmedString, - UUIDType, -) -from galaxy.model.migrate.versions.util import ( - add_column, - alter_column, - create_table, - drop_column, - drop_table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -WorkflowInvocationToSubworkflowInvocationAssociation_table = Table( - "workflow_invocation_to_subworkflow_invocation_association", - metadata, - Column("id", Integer, primary_key=True), - Column("workflow_invocation_id", Integer), - Column("subworkflow_invocation_id", Integer), - Column("workflow_step_id", Integer), - ForeignKeyConstraint(["workflow_invocation_id"], ["workflow_invocation.id"], name="fk_wfi_swi_wfi"), - ForeignKeyConstraint(["subworkflow_invocation_id"], ["workflow_invocation.id"], name="fk_wfi_swi_swi"), - ForeignKeyConstraint(["workflow_step_id"], ["workflow_step.id"], name="fk_wfi_swi_ws"), -) - -WorkflowRequestInputStepParameter_table = Table( - "workflow_request_input_step_parameter", - metadata, - Column("id", Integer, primary_key=True), - Column("workflow_invocation_id", Integer), - Column("workflow_step_id", Integer), - Column("parameter_value", JSONType), - ForeignKeyConstraint(["workflow_invocation_id"], ["workflow_invocation.id"], name="fk_wfreq_isp_wfi"), - ForeignKeyConstraint(["workflow_step_id"], ["workflow_step.id"], name="fk_wfreq_isp_ws"), -) - -TABLES = [ - WorkflowInvocationToSubworkflowInvocationAssociation_table, - WorkflowRequestInputStepParameter_table, -] - -INDEXES = [ - Index("ix_wfinv_swfinv_wfi", WorkflowInvocationToSubworkflowInvocationAssociation_table.c.workflow_invocation_id), - Index( - "ix_wfinv_swfinv_swfi", WorkflowInvocationToSubworkflowInvocationAssociation_table.c.subworkflow_invocation_id - ), - Index("ix_wfreq_inputstep_wfi", WorkflowRequestInputStepParameter_table.c.workflow_invocation_id), -] - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - if migrate_engine.name in ["postgres", "postgresql"]: - subworkflow_id_column = Column("subworkflow_id", Integer, ForeignKey("workflow.id"), nullable=True) - input_subworkflow_step_id_column = Column( - "input_subworkflow_step_id", Integer, ForeignKey("workflow_step.id"), nullable=True - ) - parent_workflow_id_column = Column("parent_workflow_id", Integer, ForeignKey("workflow.id"), nullable=True) - else: - subworkflow_id_column = Column("subworkflow_id", Integer, nullable=True) - input_subworkflow_step_id_column = Column("input_subworkflow_step_id", Integer, nullable=True) - parent_workflow_id_column = Column("parent_workflow_id", Integer, nullable=True) - add_column(subworkflow_id_column, "workflow_step", metadata) - add_column(input_subworkflow_step_id_column, "workflow_step_connection", metadata) - add_column(parent_workflow_id_column, "workflow", metadata) - workflow_output_label_column = Column("label", TrimmedString(255)) - workflow_output_uuid_column = Column("uuid", UUIDType, nullable=True) - add_column(workflow_output_label_column, "workflow_output", metadata) - add_column(workflow_output_uuid_column, "workflow_output", metadata) - - # Make stored_workflow_id nullable, since now workflows can belong to either - # a stored workflow or a parent workflow. - alter_column("stored_workflow_id", "workflow", metadata, nullable=True) - - for table in TABLES: - # Indexes are automatically created when the tables are. - create_table(table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("subworkflow_id", "workflow_step", metadata) - drop_column("parent_workflow_id", "workflow", metadata) - - drop_column("input_subworkflow_step_id", "workflow_step_connection", metadata) - - drop_column("label", "workflow_output", metadata) - drop_column("uuid", "workflow_output", metadata) - - for table in TABLES: - drop_table(table) diff --git a/lib/galaxy/model/migrate/versions/0132_add_lastpasswordchange_to_user.py b/lib/galaxy/model/migrate/versions/0132_add_lastpasswordchange_to_user.py deleted file mode 100644 index d1fe77793a30..000000000000 --- a/lib/galaxy/model/migrate/versions/0132_add_lastpasswordchange_to_user.py +++ /dev/null @@ -1,23 +0,0 @@ -""" -Migration script to add a last_password_change field to the user table -""" - -from sqlalchemy import ( - Column, - DateTime, - MetaData, - Table, -) - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - account = Table("galaxy_user", meta, autoload=True) - lpc = Column("last_password_change", DateTime()) - lpc.create(account) - - -def downgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - account = Table("galaxy_user", meta, autoload=True) - account.c.last_password_change.drop() diff --git a/lib/galaxy/model/migrate/versions/0133_add_dependency_column_to_job.py b/lib/galaxy/model/migrate/versions/0133_add_dependency_column_to_job.py deleted file mode 100644 index 9e264bfa2d52..000000000000 --- a/lib/galaxy/model/migrate/versions/0133_add_dependency_column_to_job.py +++ /dev/null @@ -1,45 +0,0 @@ -""" -Add dependencies column to jobs table -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, - Table, -) - -from galaxy.model.custom_types import JSONType - -log = logging.getLogger(__name__) -jobs_dependencies_column = Column("dependencies", JSONType, nullable=True) - - -def upgrade(migrate_engine): - print(__doc__) - metadata = MetaData() - metadata.bind = migrate_engine - metadata.reflect() - - # Add the dependencies column to the job table - try: - jobs_table = Table("job", metadata, autoload=True) - jobs_dependencies_column.create(jobs_table) - assert jobs_dependencies_column is jobs_table.c.dependencies - except Exception: - log.exception("Adding column 'dependencies' to job table failed.") - - -def downgrade(migrate_engine): - metadata = MetaData() - metadata.bind = migrate_engine - metadata.reflect() - - # Drop the job table's dependencies column. - try: - jobs_table = Table("job", metadata, autoload=True) - jobs_dependencies = jobs_table.c.dependencies - jobs_dependencies.drop() - except Exception: - log.exception("Dropping 'dependencies' column from job table failed.") diff --git a/lib/galaxy/model/migrate/versions/0134_hda_set_deleted_if_purged.py b/lib/galaxy/model/migrate/versions/0134_hda_set_deleted_if_purged.py deleted file mode 100644 index 2457748b4927..000000000000 --- a/lib/galaxy/model/migrate/versions/0134_hda_set_deleted_if_purged.py +++ /dev/null @@ -1,11 +0,0 @@ -""" -Postponed to migration 160. -""" - - -def upgrade(migrate_engine): - print(__doc__) - - -def downgrade(migrate_engine): - pass diff --git a/lib/galaxy/model/migrate/versions/0135_add_library_tags.py b/lib/galaxy/model/migrate/versions/0135_add_library_tags.py deleted file mode 100644 index 9c5a68a883a8..000000000000 --- a/lib/galaxy/model/migrate/versions/0135_add_library_tags.py +++ /dev/null @@ -1,58 +0,0 @@ -""" -This migration script adds support for storing tags in the context of a dataset in a library -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - Table, -) - -# Need our custom types, but don't import anything else from model -from galaxy.model.custom_types import TrimmedString - -log = logging.getLogger(__name__) -metadata = MetaData() - - -LibraryDatasetDatasetAssociationTagAssociation_table = Table( - "library_dataset_dataset_association_tag_association", - metadata, - Column("id", Integer, primary_key=True), - Column( - "library_dataset_dataset_association_id", - Integer, - ForeignKey("library_dataset_dataset_association.id"), - index=True, - ), - Column("tag_id", Integer, ForeignKey("tag.id"), index=True), - Column("user_tname", TrimmedString(255), index=True), - Column("value", TrimmedString(255), index=True), - Column("user_value", TrimmedString(255), index=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), -) - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - - try: - LibraryDatasetDatasetAssociationTagAssociation_table.create() - except Exception: - log.exception("Creating library_dataset_association_tag_association table failed.") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - try: - LibraryDatasetDatasetAssociationTagAssociation_table.drop() - except Exception: - log.exception("Dropping library_dataset_association_tag_association table failed.") diff --git a/lib/galaxy/model/migrate/versions/0136_collection_and_workflow_state.py b/lib/galaxy/model/migrate/versions/0136_collection_and_workflow_state.py deleted file mode 100644 index be58fa9aa740..000000000000 --- a/lib/galaxy/model/migrate/versions/0136_collection_and_workflow_state.py +++ /dev/null @@ -1,163 +0,0 @@ -""" -Migration script for collections and workflows connections. -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - String, - Table, -) - -from galaxy.model.custom_types import TrimmedString -from galaxy.model.migrate.versions.util import ( - add_column, - create_table, - drop_column, - drop_table, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - -workflow_invocation_output_dataset_association_table = Table( - "workflow_invocation_output_dataset_association", - metadata, - Column("id", Integer, primary_key=True), - Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True), - Column("workflow_step_id", Integer, ForeignKey("workflow_step.id")), - Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True), - Column("workflow_output_id", Integer, ForeignKey("workflow_output.id")), -) - -workflow_invocation_output_dataset_collection_association_table = Table( - "workflow_invocation_output_dataset_collection_association", - metadata, - Column("id", Integer, primary_key=True), - Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id", name="fk_wiodca_wii"), index=True), - Column("workflow_step_id", Integer, ForeignKey("workflow_step.id", name="fk_wiodca_wsi")), - Column( - "dataset_collection_id", - Integer, - ForeignKey("history_dataset_collection_association.id", name="fk_wiodca_dci"), - index=True, - ), - Column("workflow_output_id", Integer, ForeignKey("workflow_output.id", name="fk_wiodca_woi")), -) - -workflow_invocation_step_output_dataset_association_table = Table( - "workflow_invocation_step_output_dataset_association", - metadata, - Column("id", Integer, primary_key=True), - Column("workflow_invocation_step_id", Integer, ForeignKey("workflow_invocation_step.id"), index=True), - Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True), - Column("output_name", String(255), nullable=True), -) - -workflow_invocation_step_output_dataset_collection_association_table = Table( - "workflow_invocation_step_output_dataset_collection_association", - metadata, - Column("id", Integer, primary_key=True), - Column( - "workflow_invocation_step_id", - Integer, - ForeignKey("workflow_invocation_step.id", name="fk_wisodca_wisi"), - index=True, - ), - Column("workflow_step_id", Integer, ForeignKey("workflow_step.id", name="fk_wisodca_wsi")), - Column( - "dataset_collection_id", - Integer, - ForeignKey("history_dataset_collection_association.id", name="fk_wisodca_dci"), - index=True, - ), - Column("output_name", String(255), nullable=True), -) - -implicit_collection_jobs_table = Table( - "implicit_collection_jobs", - metadata, - Column("id", Integer, primary_key=True), - Column("populated_state", TrimmedString(64), default="new", nullable=False), -) - -implicit_collection_jobs_job_association_table = Table( - "implicit_collection_jobs_job_association", - metadata, - Column("id", Integer, primary_key=True), - Column("implicit_collection_jobs_id", Integer, ForeignKey("implicit_collection_jobs.id"), index=True), - Column("job_id", Integer, ForeignKey("job.id"), index=True), # Consider making this nullable... - Column("order_index", Integer, nullable=False), -) - - -def get_new_tables(): - # Normally we define this globally in the file, but we need to delay the - # reading of existing tables because an existing workflow_invocation_step - # table exists that we want to recreate. - return [ - workflow_invocation_output_dataset_association_table, - workflow_invocation_output_dataset_collection_association_table, - workflow_invocation_step_output_dataset_association_table, - workflow_invocation_step_output_dataset_collection_association_table, - implicit_collection_jobs_table, - implicit_collection_jobs_job_association_table, - ] - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - for table in get_new_tables(): - create_table(table) - - # Set default for creation to scheduled, actual mapping has new as default. - workflow_invocation_step_state_column = Column("state", TrimmedString(64), default="scheduled") - if migrate_engine.name in ["postgres", "postgresql"]: - implicit_collection_jobs_id_column = Column( - "implicit_collection_jobs_id", Integer, ForeignKey("implicit_collection_jobs.id"), nullable=True - ) - job_id_column = Column("job_id", Integer, ForeignKey("job.id"), nullable=True) - else: - implicit_collection_jobs_id_column = Column("implicit_collection_jobs_id", Integer, nullable=True) - job_id_column = Column("job_id", Integer, nullable=True) - dataset_collection_element_count_column = Column("element_count", Integer, nullable=True) - - add_column(implicit_collection_jobs_id_column, "history_dataset_collection_association", metadata) - add_column(job_id_column, "history_dataset_collection_association", metadata) - add_column(dataset_collection_element_count_column, "dataset_collection", metadata) - - implicit_collection_jobs_id_column = Column( - "implicit_collection_jobs_id", Integer, ForeignKey("implicit_collection_jobs.id"), nullable=True - ) - add_column(implicit_collection_jobs_id_column, "workflow_invocation_step", metadata) - add_column(workflow_invocation_step_state_column, "workflow_invocation_step", metadata) - - cmd = ( - "UPDATE dataset_collection SET element_count = " - + "(SELECT (CASE WHEN count(*) > 0 THEN count(*) ELSE 0 END) FROM dataset_collection_element WHERE " - + "dataset_collection_element.dataset_collection_id = dataset_collection.id)" - ) - migrate_engine.execute(cmd) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("implicit_collection_jobs_id", "history_dataset_collection_association", metadata) - drop_column("job_id", "history_dataset_collection_association", metadata) - drop_column("implicit_collection_jobs_id", "workflow_invocation_step", metadata) - drop_column("state", "workflow_invocation_step", metadata) - drop_column("element_count", "dataset_collection", metadata) - - for table in reversed(get_new_tables()): - drop_table(table) diff --git a/lib/galaxy/model/migrate/versions/0137_add_copied_from_job_id_column.py b/lib/galaxy/model/migrate/versions/0137_add_copied_from_job_id_column.py deleted file mode 100644 index 5ed23b7dc6fc..000000000000 --- a/lib/galaxy/model/migrate/versions/0137_add_copied_from_job_id_column.py +++ /dev/null @@ -1,44 +0,0 @@ -""" -Add copied_from_job_id column to jobs table -""" - -import logging - -from sqlalchemy import ( - Column, - Integer, - MetaData, - Table, -) - -log = logging.getLogger(__name__) -copied_from_job_id_column = Column("copied_from_job_id", Integer, nullable=True) - - -def upgrade(migrate_engine): - print(__doc__) - metadata = MetaData() - metadata.bind = migrate_engine - metadata.reflect() - - # Add the copied_from_job_id column to the job table - try: - jobs_table = Table("job", metadata, autoload=True) - copied_from_job_id_column.create(jobs_table) - assert copied_from_job_id_column is jobs_table.c.copied_from_job_id - except Exception: - log.exception("Adding column 'copied_from_job_id_column' to job table failed.") - - -def downgrade(migrate_engine): - metadata = MetaData() - metadata.bind = migrate_engine - metadata.reflect() - - # Drop the job table's copied_from_job_id column. - try: - jobs_table = Table("job", metadata, autoload=True) - copied_from_job_id = jobs_table.c.copied_from_job_id - copied_from_job_id.drop() - except Exception: - log.exception("Dropping 'copied_from_job_id_column' column from job table failed.") diff --git a/lib/galaxy/model/migrate/versions/0138_add_hda_version.py b/lib/galaxy/model/migrate/versions/0138_add_hda_version.py deleted file mode 100644 index f7aca9438856..000000000000 --- a/lib/galaxy/model/migrate/versions/0138_add_hda_version.py +++ /dev/null @@ -1,44 +0,0 @@ -""" -Add version column to history_dataset_association table -""" - -import logging - -from sqlalchemy import ( - Column, - Integer, - MetaData, - Table, -) - -log = logging.getLogger(__name__) -version_column = Column("version", Integer, default=1) - - -def upgrade(migrate_engine): - print(__doc__) - metadata = MetaData() - metadata.bind = migrate_engine - metadata.reflect() - - # Add the version column to the history_dataset_association table - try: - hda_table = Table("history_dataset_association", metadata, autoload=True) - version_column.create(hda_table) - assert version_column is hda_table.c.version - except Exception: - log.exception("Adding column 'copied_from_job_id_column' to job table failed.") - - -def downgrade(migrate_engine): - metadata = MetaData() - metadata.bind = migrate_engine - metadata.reflect() - - # Drop the history_dataset_association table's version column. - try: - hda_table = Table("history_dataset_association", metadata, autoload=True) - version_column = hda_table.c.version - version_column.drop() - except Exception: - log.exception("Dropping 'copied_from_job_id_column' column from job table failed.") diff --git a/lib/galaxy/model/migrate/versions/0139_add_history_dataset_association_history_table.py b/lib/galaxy/model/migrate/versions/0139_add_history_dataset_association_history_table.py deleted file mode 100644 index bd55c013a510..000000000000 --- a/lib/galaxy/model/migrate/versions/0139_add_history_dataset_association_history_table.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -Migration script to add the history_dataset_association_history table. -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, -) - -from galaxy.model.custom_types import ( - MetadataType, - TrimmedString, -) -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - -HistoryDatasetAssociationHistory_table = Table( - "history_dataset_association_history", - metadata, - Column("id", Integer, primary_key=True), - Column("history_dataset_association_id", Integer, ForeignKey("history_dataset_association.id"), index=True), - Column("update_time", DateTime, default=now), - Column("version", Integer, index=True), - Column("name", TrimmedString(255)), - Column("extension", TrimmedString(64)), - Column("metadata", MetadataType, key="_metadata"), - Column("extended_metadata_id", Integer, ForeignKey("extended_metadata.id"), index=True), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(HistoryDatasetAssociationHistory_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_table(HistoryDatasetAssociationHistory_table) diff --git a/lib/galaxy/model/migrate/versions/0140_add_dataset_version_to_job_to_input_dataset_association_table.py b/lib/galaxy/model/migrate/versions/0140_add_dataset_version_to_job_to_input_dataset_association_table.py deleted file mode 100644 index d02febf1784e..000000000000 --- a/lib/galaxy/model/migrate/versions/0140_add_dataset_version_to_job_to_input_dataset_association_table.py +++ /dev/null @@ -1,44 +0,0 @@ -""" -Add dataset_version column to job_to_input_dataset table -""" - -import logging - -from sqlalchemy import ( - Column, - Integer, - MetaData, - Table, -) - -log = logging.getLogger(__name__) -dataset_version_column = Column("dataset_version", Integer) - - -def upgrade(migrate_engine): - print(__doc__) - metadata = MetaData() - metadata.bind = migrate_engine - metadata.reflect() - - # Add the version column to the job_to_input_dataset table - try: - job_to_input_dataset_table = Table("job_to_input_dataset", metadata, autoload=True) - dataset_version_column.create(job_to_input_dataset_table) - assert dataset_version_column is job_to_input_dataset_table.c.dataset_version - except Exception: - log.exception("Adding column 'dataset_history_id' to job_to_input_dataset table failed.") - - -def downgrade(migrate_engine): - metadata = MetaData() - metadata.bind = migrate_engine - metadata.reflect() - - # Drop the job_to_input_dataset table's version column. - try: - job_to_input_dataset_table = Table("job_to_input_dataset", metadata, autoload=True) - dataset_version_column = job_to_input_dataset_table.c.dataset_version - dataset_version_column.drop() - except Exception: - log.exception("Dropping 'dataset_version' column from job_to_input_dataset table failed.") diff --git a/lib/galaxy/model/migrate/versions/0141_add_oidc_tables.py b/lib/galaxy/model/migrate/versions/0141_add_oidc_tables.py deleted file mode 100644 index 0bdc1a0cbe7f..000000000000 --- a/lib/galaxy/model/migrate/versions/0141_add_oidc_tables.py +++ /dev/null @@ -1,104 +0,0 @@ -""" -Migration script to add a new tables for an OpenID Connect authentication and authorization. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - Table, - TEXT, - VARCHAR, -) - -from galaxy.model.custom_types import JSONType - -log = logging.getLogger(__name__) -metadata = MetaData() - -psa_association = Table( - "psa_association", - metadata, - Column("id", Integer, primary_key=True), - Column("server_url", VARCHAR(255)), - Column("handle", VARCHAR(255)), - Column("secret", VARCHAR(255)), - Column("issued", Integer), - Column("lifetime", Integer), - Column("assoc_type", VARCHAR(64)), -) - - -psa_code = Table( - "psa_code", - metadata, - Column("id", Integer, primary_key=True), - Column("email", VARCHAR(200)), - Column("code", VARCHAR(32)), -) - - -psa_nonce = Table( - "psa_nonce", - metadata, - Column("id", Integer, primary_key=True), - Column("server_url", VARCHAR(255)), - Column("timestamp", Integer), - Column("salt", VARCHAR(40)), -) - - -psa_partial = Table( - "psa_partial", - metadata, - Column("id", Integer, primary_key=True), - Column("token", VARCHAR(32)), - Column("data", TEXT), - Column("next_step", Integer), - Column("backend", VARCHAR(32)), -) - - -oidc_user_authnz_tokens = Table( - "oidc_user_authnz_tokens", - metadata, - Column("id", Integer, primary_key=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("uid", VARCHAR(255)), - Column("provider", VARCHAR(32)), - Column("extra_data", JSONType, nullable=True), - Column("lifetime", Integer), - Column("assoc_type", VARCHAR(64)), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - try: - psa_association.create() - psa_code.create() - psa_nonce.create() - psa_partial.create() - oidc_user_authnz_tokens.create() - except Exception: - log.exception("Creating OIDC table failed") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - try: - psa_association.drop() - psa_code.drop() - psa_nonce.drop() - psa_partial.drop() - oidc_user_authnz_tokens.drop() - except Exception: - log.exception("Dropping OIDC table failed") diff --git a/lib/galaxy/model/migrate/versions/0142_change_numeric_metric_precision.py b/lib/galaxy/model/migrate/versions/0142_change_numeric_metric_precision.py deleted file mode 100644 index 4941abd84041..000000000000 --- a/lib/galaxy/model/migrate/versions/0142_change_numeric_metric_precision.py +++ /dev/null @@ -1,33 +0,0 @@ -""" -Migration script to change the 'value' column of 'user_preference' table from numeric(22, 7) to numeric(26, 7) -""" - -import logging - -from sqlalchemy import ( - MetaData, - Numeric, - Table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - try: - t = Table("job_metric_numeric", metadata, autoload=True) - t.c.metric_value.alter(type=Numeric(26, 7)) - t = Table("task_metric_numeric", metadata, autoload=True) - t.c.metric_value.alter(type=Numeric(26, 7)) - except Exception: - log.exception("Modifying numeric column failed") - - -def downgrade(migrate_engine): - # truncating columns would require truncating data in those columns, so it's best not to downgrade them - pass diff --git a/lib/galaxy/model/migrate/versions/0143_add_cloudauthz_tables.py b/lib/galaxy/model/migrate/versions/0143_add_cloudauthz_tables.py deleted file mode 100644 index fc5b3ae3750c..000000000000 --- a/lib/galaxy/model/migrate/versions/0143_add_cloudauthz_tables.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -Migration script to add a new tables for CloudAuthz (tokens required to access cloud-based resources). -""" - -import logging - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - String, - Table, - TEXT, -) - -from galaxy.model.custom_types import JSONType - -log = logging.getLogger(__name__) -metadata = MetaData() - -cloudauthz = Table( - "cloudauthz", - metadata, - Column("id", Integer, primary_key=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), - Column("provider", String(255)), - Column("config", JSONType), - Column("authn_id", Integer, ForeignKey("oidc_user_authnz_tokens.id"), index=True), - Column("tokens", JSONType), - Column("last_update", DateTime), - Column("last_activity", DateTime), - Column("description", TEXT), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - try: - cloudauthz.create() - except Exception: - log.exception("Failed to create cloudauthz table") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - try: - cloudauthz.drop() - except Exception: - log.exception("Failed to drop cloudauthz table") diff --git a/lib/galaxy/model/migrate/versions/0144_add_cleanup_event_user_table.py b/lib/galaxy/model/migrate/versions/0144_add_cleanup_event_user_table.py deleted file mode 100644 index 0e3facdbc269..000000000000 --- a/lib/galaxy/model/migrate/versions/0144_add_cleanup_event_user_table.py +++ /dev/null @@ -1,49 +0,0 @@ -""" -Migration script to add the cleanup_event_user_association table. -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, -) - -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -now = datetime.datetime.utcnow -log = logging.getLogger(__name__) -metadata = MetaData() - -# New table to log cleanup events -CleanupEventUserAssociation_table = Table( - "cleanup_event_user_association", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(CleanupEventUserAssociation_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_table(CleanupEventUserAssociation_table) diff --git a/lib/galaxy/model/migrate/versions/0145_add_workflow_step_input.py b/lib/galaxy/model/migrate/versions/0145_add_workflow_step_input.py deleted file mode 100644 index bbcfd21b98df..000000000000 --- a/lib/galaxy/model/migrate/versions/0145_add_workflow_step_input.py +++ /dev/null @@ -1,151 +0,0 @@ -""" -Migration script for workflow step input table. -""" - -import logging - -from migrate import ForeignKeyConstraint as MigrateForeignKeyConstraint -from sqlalchemy import ( - Boolean, - Column, - ForeignKey, - Index, - Integer, - MetaData, - Table, - TEXT, -) - -from galaxy.model.custom_types import JSONType -from galaxy.model.migrate.versions.util import ( - create_table, - drop_index, - drop_table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -WorkflowStepInput_table = Table( - "workflow_step_input", - metadata, - Column("id", Integer, primary_key=True), - Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True), - Column("name", TEXT), - Column("merge_type", TEXT), - Column("scatter_type", TEXT), - Column("value_from", JSONType), - Column("value_from_type", TEXT), - Column("default_value", JSONType), - Column("default_value_set", Boolean, default=False), - Column("runtime_value", Boolean, default=False), - Index( - "ix_workflow_step_input_workflow_step_id_name_unique", - "workflow_step_id", - "name", - unique=True, - mysql_length={"name": 200}, - ), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - OldWorkflowStepConnection_table = Table("workflow_step_connection", metadata, autoload=True) - for fkc in OldWorkflowStepConnection_table.foreign_key_constraints: - mfkc = MigrateForeignKeyConstraint( - [_.parent for _ in fkc.elements], [_.column for _ in fkc.elements], name=fkc.name - ) - try: - mfkc.drop() - except Exception: - log.exception( - "Dropping foreign key constraint '%s' from table '%s' failed", - mfkc.name, - OldWorkflowStepConnection_table, - ) - - for index in OldWorkflowStepConnection_table.indexes: - drop_index(index, OldWorkflowStepConnection_table) - OldWorkflowStepConnection_table.rename("workflow_step_connection_preupgrade145") - # Try to deregister that table to work around some caching problems it seems. - OldWorkflowStepConnection_table.deregister() - metadata._remove_table("workflow_step_connection", metadata.schema) - metadata.reflect() - - NewWorkflowStepConnection_table = Table( - "workflow_step_connection", - metadata, - Column("id", Integer, primary_key=True), - Column("output_step_id", Integer, ForeignKey("workflow_step.id"), index=True), - Column("input_step_input_id", Integer, ForeignKey("workflow_step_input.id"), index=True), - Column("output_name", TEXT), - Column("input_subworkflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True), - ) - for table in (WorkflowStepInput_table, NewWorkflowStepConnection_table): - create_table(table) - - insert_step_inputs_cmd = ( - "INSERT INTO workflow_step_input (workflow_step_id, name) " - + "SELECT DISTINCT input_step_id, input_name FROM workflow_step_connection_preupgrade145" - ) - migrate_engine.execute(insert_step_inputs_cmd) - - insert_step_connections_cmd = ( - "INSERT INTO workflow_step_connection (output_step_id, input_step_input_id, output_name, input_subworkflow_step_id) " - + "SELECT wsc.output_step_id, wsi.id, wsc.output_name, wsc.input_subworkflow_step_id " - + "FROM workflow_step_connection_preupgrade145 AS wsc JOIN workflow_step_input AS wsi ON wsc.input_step_id = wsi.workflow_step_id AND wsc.input_name = wsi.name ORDER BY wsc.id" - ) - migrate_engine.execute(insert_step_connections_cmd) - drop_table(OldWorkflowStepConnection_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - - NewWorkflowStepConnection_table = Table("workflow_step_connection", metadata, autoload=True) - for fkc in NewWorkflowStepConnection_table.foreign_key_constraints: - mfkc = MigrateForeignKeyConstraint( - [_.parent for _ in fkc.elements], [_.column for _ in fkc.elements], name=fkc.name - ) - try: - mfkc.drop() - except Exception: - log.exception( - "Dropping foreign key constraint '%s' from table '%s' failed", - mfkc.name, - NewWorkflowStepConnection_table, - ) - - for index in NewWorkflowStepConnection_table.indexes: - drop_index(index, NewWorkflowStepConnection_table) - NewWorkflowStepConnection_table.rename("workflow_step_connection_predowngrade145") - # Try to deregister that table to work around some caching problems it seems. - NewWorkflowStepConnection_table.deregister() - metadata._remove_table("workflow_step_connection", metadata.schema) - metadata.reflect() - - OldWorkflowStepConnection_table = Table( - "workflow_step_connection", - metadata, - Column("id", Integer, primary_key=True), - Column("output_step_id", Integer, ForeignKey("workflow_step.id"), index=True), - Column("input_step_id", Integer, ForeignKey("workflow_step.id"), index=True), - Column("output_name", TEXT), - Column("input_name", TEXT), - Column("input_subworkflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True), - ) - create_table(OldWorkflowStepConnection_table) - - insert_step_connections_cmd = ( - "INSERT INTO workflow_step_connection (output_step_id, input_step_id, output_name, input_name, input_subworkflow_step_id) " - + "SELECT wsc.output_step_id, wsi.workflow_step_id, wsc.output_name, wsi.name, wsc.input_subworkflow_step_id " - + "FROM workflow_step_connection_predowngrade145 AS wsc JOIN workflow_step_input AS wsi ON wsc.input_step_input_id = wsi.id ORDER BY wsc.id" - ) - migrate_engine.execute(insert_step_connections_cmd) - - for table in (NewWorkflowStepConnection_table, WorkflowStepInput_table): - drop_table(table) diff --git a/lib/galaxy/model/migrate/versions/0146_workflow_paths.py b/lib/galaxy/model/migrate/versions/0146_workflow_paths.py deleted file mode 100644 index 5c267337377c..000000000000 --- a/lib/galaxy/model/migrate/versions/0146_workflow_paths.py +++ /dev/null @@ -1,34 +0,0 @@ -""" -Migration script for workflow paths. -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, - TEXT, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - from_path_column = Column("from_path", TEXT) - add_column(from_path_column, "stored_workflow", metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - - drop_column("from_path", "stored_workflow", metadata) diff --git a/lib/galaxy/model/migrate/versions/0147_job_messages.py b/lib/galaxy/model/migrate/versions/0147_job_messages.py deleted file mode 100644 index 74ea07dfe740..000000000000 --- a/lib/galaxy/model/migrate/versions/0147_job_messages.py +++ /dev/null @@ -1,62 +0,0 @@ -""" -Add structured failure reason column to jobs table -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, - Table, - TEXT, -) - -from galaxy.model.custom_types import JSONType -from galaxy.model.migrate.versions.util import ( - add_column, - alter_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - jobs_table = Table("job", metadata, autoload=True) - job_messages_column = Column("job_messages", JSONType, nullable=True) - add_column(job_messages_column, jobs_table, metadata) - job_job_stdout_column = Column("job_stdout", TEXT, nullable=True) - add_column(job_job_stdout_column, jobs_table, metadata) - job_job_stderr_column = Column("job_stderr", TEXT, nullable=True) - add_column(job_job_stderr_column, jobs_table, metadata) - - tasks_table = Table("task", metadata, autoload=True) - task_job_messages_column = Column("job_messages", JSONType, nullable=True) - add_column(task_job_messages_column, tasks_table, metadata) - task_job_stdout_column = Column("job_stdout", TEXT, nullable=True) - add_column(task_job_stdout_column, tasks_table, metadata) - task_job_stderr_column = Column("job_stderr", TEXT, nullable=True) - add_column(task_job_stderr_column, tasks_table, metadata) - - for table in [jobs_table, tasks_table]: - alter_column("stdout", table, name="tool_stdout") - alter_column("stderr", table, name="tool_stderr") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - jobs_table = Table("job", metadata, autoload=True) - tasks_table = Table("task", metadata, autoload=True) - for colname in ["job_messages", "job_stdout", "job_stderr"]: - drop_column(colname, jobs_table) - drop_column(colname, tasks_table) - for table in [jobs_table, tasks_table]: - alter_column("tool_stdout", table, name="stdout") - alter_column("tool_stderr", table, name="stderr") diff --git a/lib/galaxy/model/migrate/versions/0148_add_checksum_table.py b/lib/galaxy/model/migrate/versions/0148_add_checksum_table.py deleted file mode 100644 index 7d865633f622..000000000000 --- a/lib/galaxy/model/migrate/versions/0148_add_checksum_table.py +++ /dev/null @@ -1,69 +0,0 @@ -""" -Migration script to add dataset source and hash tables. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - Table, - TEXT, -) - -from galaxy.model.custom_types import JSONType -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -dataset_source_table = Table( - "dataset_source", - metadata, - Column("id", Integer, primary_key=True), - Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True), - Column("source_uri", TEXT), - Column("extra_files_path", TEXT), - Column("transform", JSONType), -) - -dataset_hash_table = Table( - "dataset_hash", - metadata, - Column("id", Integer, primary_key=True), - Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True), - Column("hash_function", TEXT), - Column("hash_value", TEXT), - Column("extra_files_path", TEXT), -) - -dataset_source_hash_table = Table( - "dataset_source_hash", - metadata, - Column("id", Integer, primary_key=True), - Column("dataset_source_id", Integer, ForeignKey("dataset_source.id"), index=True), - Column("hash_function", TEXT), - Column("hash_value", TEXT), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - create_table(dataset_source_table) - create_table(dataset_hash_table) - create_table(dataset_source_hash_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - drop_table(dataset_source_hash_table) - drop_table(dataset_hash_table) - drop_table(dataset_source_table) diff --git a/lib/galaxy/model/migrate/versions/0149_dynamic_tools.py b/lib/galaxy/model/migrate/versions/0149_dynamic_tools.py deleted file mode 100644 index e1d4006fe58b..000000000000 --- a/lib/galaxy/model/migrate/versions/0149_dynamic_tools.py +++ /dev/null @@ -1,73 +0,0 @@ -""" -Migration script to add the dynamic_tool table. -""" -import datetime -import logging - -from sqlalchemy import ( - Boolean, - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, - Unicode, -) - -from galaxy.model.custom_types import ( - JSONType, - UUIDType, -) -from galaxy.model.migrate.versions.util import ( - add_column, - create_table, - drop_column, - drop_table, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - - -DynamicTool_table = Table( - "dynamic_tool", - metadata, - Column("id", Integer, primary_key=True), - Column("uuid", UUIDType()), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("tool_id", Unicode(255)), - Column("tool_version", Unicode(255)), - Column("tool_format", Unicode(255)), - Column("tool_path", Unicode(255)), - Column("tool_directory", Unicode(255)), - Column("hidden", Boolean), - Column("active", Boolean), - Column("value", JSONType), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(DynamicTool_table) - - workflow_dynamic_tool_id_column = Column("dynamic_tool_id", Integer, ForeignKey("dynamic_tool.id"), nullable=True) - add_column(workflow_dynamic_tool_id_column, "workflow_step", metadata) - job_workflow_dynamic_tool_id_column = Column( - "dynamic_tool_id", Integer, ForeignKey("dynamic_tool.id"), nullable=True - ) - add_column(job_workflow_dynamic_tool_id_column, "job", metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("dynamic_tool_id", "workflow_step", metadata) - drop_column("dynamic_tool_id", "job", metadata) - drop_table(DynamicTool_table) diff --git a/lib/galaxy/model/migrate/versions/0150_add_create_time_field_for_cloudauthz.py b/lib/galaxy/model/migrate/versions/0150_add_create_time_field_for_cloudauthz.py deleted file mode 100644 index 60fed37e4a92..000000000000 --- a/lib/galaxy/model/migrate/versions/0150_add_create_time_field_for_cloudauthz.py +++ /dev/null @@ -1,39 +0,0 @@ -""" -Adds `create_time` columns to cloudauthz table. -""" - - -import logging - -from sqlalchemy import ( - Column, - DateTime, - MetaData, - Table, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - cloudauthz_table = Table("cloudauthz", metadata, autoload=True) - create_time_column = Column("create_time", DateTime) - add_column(create_time_column, cloudauthz_table, metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - cloudauthz_table = Table("cloudauthz", metadata, autoload=True) - drop_column("create_time", cloudauthz_table) diff --git a/lib/galaxy/model/migrate/versions/0151_add_worker_process.py b/lib/galaxy/model/migrate/versions/0151_add_worker_process.py deleted file mode 100644 index 4a024ebbe619..000000000000 --- a/lib/galaxy/model/migrate/versions/0151_add_worker_process.py +++ /dev/null @@ -1,50 +0,0 @@ -""" -Add table for worker processes -""" - -import logging - -from sqlalchemy import ( - Column, - DateTime, - Integer, - MetaData, - String, - Table, - UniqueConstraint, -) - -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) -from galaxy.model.orm.now import now - -log = logging.getLogger(__name__) -metadata = MetaData() - - -WorkerProcess_table = Table( - "worker_process", - metadata, - Column("id", Integer, primary_key=True), - Column("server_name", String(255), index=True), - Column("hostname", String(255)), - Column("update_time", DateTime, default=now, onupdate=now), - UniqueConstraint("server_name", "hostname"), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(WorkerProcess_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_table(WorkerProcess_table) diff --git a/lib/galaxy/model/migrate/versions/0152_add_metadata_file_uuid.py b/lib/galaxy/model/migrate/versions/0152_add_metadata_file_uuid.py deleted file mode 100644 index 3e2e3c3ee73e..000000000000 --- a/lib/galaxy/model/migrate/versions/0152_add_metadata_file_uuid.py +++ /dev/null @@ -1,37 +0,0 @@ -""" -Adds `uuid` column to MetadataFile table. -""" - - -import logging - -from sqlalchemy import ( - Column, - MetaData, -) - -from galaxy.model.custom_types import UUIDType -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) - - -def upgrade(migrate_engine): - print(__doc__) - metadata = MetaData() - metadata.bind = migrate_engine - metadata.reflect() - - uuid_column = Column("uuid", UUIDType()) - add_column(uuid_column, "metadata_file", metadata) - - -def downgrade(migrate_engine): - metadata = MetaData() - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("uuid", "metadata_file", metadata) diff --git a/lib/galaxy/model/migrate/versions/0153_add_custos_authnz_token_table.py b/lib/galaxy/model/migrate/versions/0153_add_custos_authnz_token_table.py deleted file mode 100644 index fa6de685a006..000000000000 --- a/lib/galaxy/model/migrate/versions/0153_add_custos_authnz_token_table.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -Migration for adding custos_authnz_token table. -""" - -import logging - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - String, - Table, - Text, - UniqueConstraint, -) - -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -CustosAuthnzToken_table = Table( - "custos_authnz_token", - metadata, - Column("id", Integer, primary_key=True), - Column("user_id", Integer, ForeignKey("galaxy_user.id")), - Column("external_user_id", String(64)), - Column("provider", String(255)), - Column("access_token", Text), - Column("id_token", Text), - Column("refresh_token", Text), - Column("expiration_time", DateTime), - Column("refresh_expiration_time", DateTime), - UniqueConstraint("user_id", "external_user_id", "provider"), - UniqueConstraint("external_user_id", "provider"), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - create_table(CustosAuthnzToken_table) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_table(CustosAuthnzToken_table) diff --git a/lib/galaxy/model/migrate/versions/0154_created_from_basename.py b/lib/galaxy/model/migrate/versions/0154_created_from_basename.py deleted file mode 100644 index d998d45adf67..000000000000 --- a/lib/galaxy/model/migrate/versions/0154_created_from_basename.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -Adds created_from_basename to dataset. -""" -import datetime -import logging - -from sqlalchemy import ( - Column, - MetaData, - TEXT, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - created_from_basename_column = Column("created_from_basename", TEXT, default=None) - add_column(created_from_basename_column, "dataset", metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("created_from_basename", "dataset", metadata) diff --git a/lib/galaxy/model/migrate/versions/0155_job_galaxy_version.py b/lib/galaxy/model/migrate/versions/0155_job_galaxy_version.py deleted file mode 100644 index 78b81fcf0c25..000000000000 --- a/lib/galaxy/model/migrate/versions/0155_job_galaxy_version.py +++ /dev/null @@ -1,37 +0,0 @@ -""" -Add 'galaxy_version' attribute to Job table. -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - MetaData, - String, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - created_from_basename_column = Column("galaxy_version", String(64), default=None) - add_column(created_from_basename_column, "job", metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("galaxy_version", "job", metadata) diff --git a/lib/galaxy/model/migrate/versions/0156_add_interactivetools.py b/lib/galaxy/model/migrate/versions/0156_add_interactivetools.py deleted file mode 100644 index 23af62a7978e..000000000000 --- a/lib/galaxy/model/migrate/versions/0156_add_interactivetools.py +++ /dev/null @@ -1,84 +0,0 @@ -""" -Migration script to add new tables for InteractiveTools. -""" - -import logging - -from sqlalchemy import ( - Boolean, - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - Table, - TEXT, -) - -from galaxy.model.custom_types import JSONType -from galaxy.model.orm.now import now - -log = logging.getLogger(__name__) -metadata = MetaData() - -interactivetool_entry_point = Table( - "interactivetool_entry_point", - metadata, - Column("id", Integer, primary_key=True), - Column("job_id", Integer, ForeignKey("job.id"), index=True), - Column("name", TEXT), - Column("token", TEXT), - Column("tool_port", Integer), - Column("host", TEXT), - Column("port", Integer), - Column("protocol", TEXT), - Column("entry_url", TEXT), - Column("info", JSONType, nullable=True), - Column("configured", Boolean, default=False), - Column("deleted", Boolean, default=False), - Column("created_time", DateTime, default=now), - Column("modified_time", DateTime, default=now, onupdate=now), -) - -job_container_association = Table( - "job_container_association", - metadata, - Column("id", Integer, primary_key=True), - Column("job_id", Integer, ForeignKey("job.id"), index=True), - Column("container_type", TEXT), - Column("container_name", TEXT), - Column("container_info", JSONType, nullable=True), - Column("created_time", DateTime, default=now), - Column("modified_time", DateTime, default=now, onupdate=now), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - try: - job_container_association.create() - except Exception: - log.exception("Failed to create job_container_association table") - - try: - interactivetool_entry_point.create() - except Exception: - log.exception("Failed to create interactivetool_entry_point table") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - try: - job_container_association.drop() - except Exception: - log.exception("Failed to drop job_container_association table") - - try: - interactivetool_entry_point.drop() - except Exception: - log.exception("Failed to drop interactivetool_entry_point table") diff --git a/lib/galaxy/model/migrate/versions/0157_rework_dataset_validation.py b/lib/galaxy/model/migrate/versions/0157_rework_dataset_validation.py deleted file mode 100644 index 9675975e7c46..000000000000 --- a/lib/galaxy/model/migrate/versions/0157_rework_dataset_validation.py +++ /dev/null @@ -1,67 +0,0 @@ -""" -Rework dataset validation in database. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - Table, - TEXT, -) - -from galaxy.model.custom_types import TrimmedString -from galaxy.model.migrate.versions.util import ( - add_column, - create_table, - drop_column, - drop_table, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -validation_error_table = Table( - "validation_error", - metadata, - Column("id", Integer, primary_key=True), - Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True), - Column("message", TrimmedString(255)), - Column("err_type", TrimmedString(64)), - Column("attributes", TEXT), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - drop_table(validation_error_table) - - history_dataset_association_table = Table("history_dataset_association", metadata, autoload=True) - library_dataset_dataset_association_table = Table("library_dataset_dataset_association", metadata, autoload=True) - for dataset_instance_table in [history_dataset_association_table, library_dataset_dataset_association_table]: - validated_state_column = Column( - "validated_state", TrimmedString(64), default="unknown", server_default="unknown", nullable=False - ) - add_column(validated_state_column, dataset_instance_table, metadata) - - validated_state_message_column = Column("validated_state_message", TEXT) - add_column(validated_state_message_column, dataset_instance_table, metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - create_table(validation_error_table) - - history_dataset_association_table = Table("history_dataset_association", metadata, autoload=True) - library_dataset_dataset_association_table = Table("library_dataset_dataset_association", metadata, autoload=True) - for dataset_instance_table in [history_dataset_association_table, library_dataset_dataset_association_table]: - drop_column("validated_state", dataset_instance_table, metadata) - drop_column("validated_state_message", dataset_instance_table, metadata) diff --git a/lib/galaxy/model/migrate/versions/0158_workflow_reports.py b/lib/galaxy/model/migrate/versions/0158_workflow_reports.py deleted file mode 100644 index 212c5368f669..000000000000 --- a/lib/galaxy/model/migrate/versions/0158_workflow_reports.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -Adds reports_config to workflow. -""" -import datetime -import logging - -from sqlalchemy import ( - Column, - MetaData, -) - -from galaxy.model.custom_types import JSONType -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -now = datetime.datetime.utcnow -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - - reports_config_column = Column("reports_config", JSONType, default=None) - add_column(reports_config_column, "workflow", metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("reports_config", "workflow", metadata) diff --git a/lib/galaxy/model/migrate/versions/0159_add_job_external_id_index.py b/lib/galaxy/model/migrate/versions/0159_add_job_external_id_index.py deleted file mode 100644 index d68e18ae5fd3..000000000000 --- a/lib/galaxy/model/migrate/versions/0159_add_job_external_id_index.py +++ /dev/null @@ -1,28 +0,0 @@ -""" -Add index for job runner external ID. -""" - -import logging - -from sqlalchemy import MetaData - -from galaxy.model.migrate.versions.util import ( - add_index, - drop_index, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - add_index("ix_job_job_runner_external_id", "job", "job_runner_external_id", metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - drop_index("ix_job_job_runner_external_id", "job", "job_runner_external_id", metadata) diff --git a/lib/galaxy/model/migrate/versions/0160_hda_set_deleted_if_purged_again.py b/lib/galaxy/model/migrate/versions/0160_hda_set_deleted_if_purged_again.py deleted file mode 100644 index 7969c9e4c5ae..000000000000 --- a/lib/galaxy/model/migrate/versions/0160_hda_set_deleted_if_purged_again.py +++ /dev/null @@ -1,23 +0,0 @@ -""" -Migration script to set the 'deleted' column of the -'history_dataset_association' table to True if 'purged' is True. -""" - -import logging - -from galaxy.model.migrate.versions.util import engine_true - -log = logging.getLogger(__name__) - - -def upgrade(migrate_engine): - print(__doc__) - cmd = f"UPDATE history_dataset_association SET deleted={engine_true(migrate_engine)} WHERE purged AND NOT deleted;" - try: - migrate_engine.execute(cmd) - except Exception: - log.exception("Exception executing SQL command: %s", cmd) - - -def downgrade(migrate_engine): - pass diff --git a/lib/galaxy/model/migrate/versions/0161_add_workflow_invocation_output_table.py b/lib/galaxy/model/migrate/versions/0161_add_workflow_invocation_output_table.py deleted file mode 100644 index c60c103ec9bd..000000000000 --- a/lib/galaxy/model/migrate/versions/0161_add_workflow_invocation_output_table.py +++ /dev/null @@ -1,49 +0,0 @@ -""" -Migration script to add a new workflow_invocation_output_parameter table to track output parameters. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - Table, -) - -from galaxy.model.custom_types import JSONType - -log = logging.getLogger(__name__) -metadata = MetaData() - -workflow_invocation_output_parameter_table = Table( - "workflow_invocation_output_value", - metadata, - Column("id", Integer, primary_key=True), - Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True), - Column("workflow_step_id", Integer, ForeignKey("workflow_step.id")), - Column("workflow_output_id", Integer, ForeignKey("workflow_output.id"), index=True), - Column("value", JSONType), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - try: - workflow_invocation_output_parameter_table.create() - except Exception: - log.exception("Creating workflow_invocation_output_parameter table failed") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - try: - workflow_invocation_output_parameter_table.drop() - except Exception: - log.exception("Dropping workflow_invocation_output_parameter table failed") diff --git a/lib/galaxy/model/migrate/versions/0162_job_only_pjas.py b/lib/galaxy/model/migrate/versions/0162_job_only_pjas.py deleted file mode 100644 index 97dcdc3b1616..000000000000 --- a/lib/galaxy/model/migrate/versions/0162_job_only_pjas.py +++ /dev/null @@ -1,31 +0,0 @@ -""" -Migration script to allow null workflow_step for PostJobActions. -This enables using PJAs with individual job executions. -""" - -import logging - -from sqlalchemy import MetaData - -from galaxy.model.migrate.versions.util import alter_column - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - # Make workflow_step_id nullable to allow for PJAs to be created for - # individual jobs. - alter_column("workflow_step_id", "post_job_action", metadata, nullable=True) - - -def downgrade(migrate_engine): - # This is not a reversible migration, because post-migrate we may introduce - # null values to the column which cannot later be easily 'fixed'. They - # should not cause any issue to simply ignore, though -- I don't think - # there was really a great reason this was non-nullable when I first wrote it. - pass diff --git a/lib/galaxy/model/migrate/versions/0163_worker_process_pid.py b/lib/galaxy/model/migrate/versions/0163_worker_process_pid.py deleted file mode 100644 index 7ae8578cba3a..000000000000 --- a/lib/galaxy/model/migrate/versions/0163_worker_process_pid.py +++ /dev/null @@ -1,37 +0,0 @@ -""" -Adds `pid` column to worker_process table. -""" - - -import logging - -from sqlalchemy import ( - Column, - Integer, - MetaData, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) - - -def upgrade(migrate_engine): - print(__doc__) - metadata = MetaData() - metadata.bind = migrate_engine - metadata.reflect() - - pid_column = Column("pid", Integer) - add_column(pid_column, "worker_process", metadata) - - -def downgrade(migrate_engine): - metadata = MetaData() - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("pid", "worker_process", metadata) diff --git a/lib/galaxy/model/migrate/versions/0164_page_format.py b/lib/galaxy/model/migrate/versions/0164_page_format.py deleted file mode 100644 index 5c791732e7e0..000000000000 --- a/lib/galaxy/model/migrate/versions/0164_page_format.py +++ /dev/null @@ -1,38 +0,0 @@ -""" -Adds page content format. -""" -import datetime -import logging - -from sqlalchemy import ( - Column, - MetaData, -) - -from galaxy.model.custom_types import TrimmedString -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -now = datetime.datetime.utcnow -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - - content_format_column = Column( - "content_format", TrimmedString(32), default="html", server_default="html", nullable=False - ) - add_column(content_format_column, "page_revision", metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("content_format", "page_revision", metadata) diff --git a/lib/galaxy/model/migrate/versions/0165_add_content_update_time.py b/lib/galaxy/model/migrate/versions/0165_add_content_update_time.py deleted file mode 100644 index cb369939ef31..000000000000 --- a/lib/galaxy/model/migrate/versions/0165_add_content_update_time.py +++ /dev/null @@ -1,57 +0,0 @@ -""" -Adds timestamps to hdca table. Adds triggers to dataset, hda, hdca tables -to update history.update_time when contents are changed. -""" - -import logging - -from sqlalchemy import ( - Column, - DateTime, - MetaData, - Table, -) - -from galaxy.model.migrate.triggers.history_update_time_field import ( - drop_timestamp_triggers, - install_timestamp_triggers, -) -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) -from galaxy.model.orm.now import now - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - create_timestamps(metadata, "history_dataset_collection_association") - install_timestamp_triggers(migrate_engine) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - drop_timestamp_triggers(migrate_engine) - drop_timestamps(metadata, "history_dataset_collection_association") - - -def create_timestamps(metadata, table_name): - target_table = Table(table_name, metadata, autoload=True) - if "create_time" not in target_table.c: - create_time_column = Column("create_time", DateTime, default=now) - add_column(create_time_column, target_table, metadata) - if "update_time" not in target_table.c: - update_time_column = Column("update_time", DateTime, default=now, onupdate=now) - add_column(update_time_column, target_table, metadata) - - -def drop_timestamps(metadata, table_name): - target_table = Table(table_name, metadata, autoload=True) - drop_column("create_time", target_table) - drop_column("update_time", target_table) diff --git a/lib/galaxy/model/migrate/versions/0166_job_state_summary_view.py b/lib/galaxy/model/migrate/versions/0166_job_state_summary_view.py deleted file mode 100644 index 40b986408c6a..000000000000 --- a/lib/galaxy/model/migrate/versions/0166_job_state_summary_view.py +++ /dev/null @@ -1,30 +0,0 @@ -""" -Job state trigger syncs update_time in hdca table. -Add job-state-summary view for hdca elements -""" - -import logging - -from galaxy.model.view import HistoryDatasetCollectionJobStateSummary -from galaxy.model.view.utils import ( - CreateView, - DropView, -) - -log = logging.getLogger(__name__) - - -def upgrade(migrate_engine): - print(__doc__) - # drop first because sqlite does not support or_replace - downgrade(migrate_engine) - view = HistoryDatasetCollectionJobStateSummary - create_view = CreateView(view.name, view.__view__) - # print(str(create_view.compile(migrate_engine))) - migrate_engine.execute(create_view) - - -def downgrade(migrate_engine): - drop_view = DropView(HistoryDatasetCollectionJobStateSummary.name) - # print(str(drop_view.compile(migrate_engine))) - migrate_engine.execute(drop_view) diff --git a/lib/galaxy/model/migrate/versions/0167_add_job_to_input_dataset_collection_element_association.py b/lib/galaxy/model/migrate/versions/0167_add_job_to_input_dataset_collection_element_association.py deleted file mode 100644 index 681e10309598..000000000000 --- a/lib/galaxy/model/migrate/versions/0167_add_job_to_input_dataset_collection_element_association.py +++ /dev/null @@ -1,47 +0,0 @@ -""" -Migration script to add a new job_to_input_dataset_collection_element table to track job inputs. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, - Table, - Unicode, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -job_to_input_dataset_collection_element_table = Table( - "job_to_input_dataset_collection_element", - metadata, - Column("id", Integer, primary_key=True), - Column("job_id", Integer, ForeignKey("job.id"), index=True), - Column("dataset_collection_element_id", Integer, ForeignKey("dataset_collection_element.id"), index=True), - Column("name", Unicode(255)), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - try: - job_to_input_dataset_collection_element_table.create() - except Exception: - log.exception("Creating job_to_input_dataset_collection_element table failed") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - try: - job_to_input_dataset_collection_element_table.drop() - except Exception: - log.exception("Dropping job_to_input_dataset_collection_element table failed") diff --git a/lib/galaxy/model/migrate/versions/0168_stored_workflow_hidden_col.py b/lib/galaxy/model/migrate/versions/0168_stored_workflow_hidden_col.py deleted file mode 100644 index 5405c49bd90f..000000000000 --- a/lib/galaxy/model/migrate/versions/0168_stored_workflow_hidden_col.py +++ /dev/null @@ -1,37 +0,0 @@ -""" -Migration script to add a 'hidden' column to the 'StoredWorkflow' table. -""" - -import logging - -from sqlalchemy import ( - Boolean, - Column, - MetaData, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -# Column to add. -hidden_col = Column("hidden", Boolean, default=False) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - add_column(hidden_col, "stored_workflow", metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("hidden", "stored_workflow", metadata) diff --git a/lib/galaxy/model/migrate/versions/0169_add_missing_indexes.py b/lib/galaxy/model/migrate/versions/0169_add_missing_indexes.py deleted file mode 100644 index b3da46c043eb..000000000000 --- a/lib/galaxy/model/migrate/versions/0169_add_missing_indexes.py +++ /dev/null @@ -1,66 +0,0 @@ -""" -Migration script to create missing indexes. Adding new columns to existing tables via SQLAlchemy does not create the index, even if the column definition includes index=True. -""" - -import logging - -from sqlalchemy import MetaData - -from galaxy.model.migrate.versions.util import ( - add_index, - drop_index, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -indexes = [ - ["ix_galaxy_user_activation_token", "galaxy_user", "activation_token"], - ["ix_workflow_step_dynamic_tool_id", "workflow_step", "dynamic_tool_id"], - ["ix_history_dataset_association_version", "history_dataset_association", "version"], - ["ix_workflow_invocation_scheduler", "workflow_invocation", "scheduler"], - ["ix_page_slug", "page", "slug"], - ["ix_workflow_invocation_state", "workflow_invocation", "state"], - [ - "ix_history_dataset_collection_association_implicit_collection_jobs_id", - "history_dataset_collection_association", - "implicit_collection_jobs_id", - ], - ["ix_workflow_step_subworkflow_id", "workflow_step", "subworkflow_id"], - ["ix_dynamic_tool_update_time", "dynamic_tool", "update_time"], - [ - "ix_library_dataset_dataset_association_extended_metadata_id", - "library_dataset_dataset_association", - "extended_metadata_id", - ], - [ - "ix_workflow_invocation_step_implicit_collection_jobs_id", - "workflow_invocation_step", - "implicit_collection_jobs_id", - ], - ["ix_workflow_invocation_step_state", "workflow_invocation_step", "state"], - ["ix_workflow_invocation_history_id", "workflow_invocation", "history_id"], - ["ix_workflow_parent_workflow_id", "workflow", "parent_workflow_id"], - ["ix_metadata_file_uuid", "metadata_file", "uuid"], - ["ix_history_dataset_collection_association_job_id", "history_dataset_collection_association", "job_id"], - ["ix_galaxy_user_active", "galaxy_user", "active"], - ["ix_job_dynamic_tool_id", "job", "dynamic_tool_id"], - ["ix_history_dataset_association_extended_metadata_id", "history_dataset_association", "extended_metadata_id"], -] - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - for ix, table, col in indexes: - add_index(ix, table, col, metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - for ix, table, col in indexes: - drop_index(ix, table, col, metadata) diff --git a/lib/galaxy/model/migrate/versions/0170_add_more_missing_indexes.py b/lib/galaxy/model/migrate/versions/0170_add_more_missing_indexes.py deleted file mode 100644 index f2e8f331205a..000000000000 --- a/lib/galaxy/model/migrate/versions/0170_add_more_missing_indexes.py +++ /dev/null @@ -1,60 +0,0 @@ -""" -Migration script to create missing indexes. Adding new columns to existing tables via SQLAlchemy does not create the index, even if the column definition includes index=True. -""" - -import logging - -from sqlalchemy import MetaData - -from galaxy.model.migrate.versions.util import ( - add_index, - drop_index, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -indexes = [ - [ - "ix_workflow_invocation_output_dataset_association_workflow_output_id", - "workflow_invocation_output_dataset_association", - "workflow_output_id", - ], - [ - "ix_workflow_invocation_output_dataset_association_workflow_step_id", - "workflow_invocation_output_dataset_association", - "workflow_step_id", - ], - [ - "ix_workflow_invocation_output_dataset_collection_association_workflow_output_id", - "workflow_invocation_output_dataset_collection_association", - "workflow_output_id", - ], - [ - "ix_workflow_invocation_output_dataset_collection_association_workflow_step_id", - "workflow_invocation_output_dataset_collection_association", - "workflow_step_id", - ], - [ - "ix_workflow_invocation_step_output_dataset_collection_association_workflow_step_id", - "workflow_invocation_step_output_dataset_collection_association", - "workflow_step_id", - ], -] - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - for ix, table, col in indexes: - add_index(ix, table, col, metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - for ix, table, col in indexes: - drop_index(ix, table, col, metadata) diff --git a/lib/galaxy/model/migrate/versions/0171_schemaorg_metadata.py b/lib/galaxy/model/migrate/versions/0171_schemaorg_metadata.py deleted file mode 100644 index c89af15f692e..000000000000 --- a/lib/galaxy/model/migrate/versions/0171_schemaorg_metadata.py +++ /dev/null @@ -1,46 +0,0 @@ -""" -Adds license and creator metadata to workflow. -""" -import datetime -import logging - -from sqlalchemy import ( - Column, - MetaData, - TEXT, -) - -from galaxy.model.custom_types import JSONType -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -now = datetime.datetime.utcnow -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - metadata.bind = migrate_engine - print(__doc__) - metadata.reflect() - - # Add person metadata in future pass at this. - # person_metadata_column = Column('person_metadata', JSONType, default=None) - # add_column(person_metadata_column, 'galaxy_user', metadata) - - creator_metadata_column = Column("creator_metadata", JSONType, default=None) - add_column(creator_metadata_column, "workflow", metadata) - - license_column = Column("license", TEXT, default=None) - add_column(license_column, "workflow", metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - # drop_column('person_metadata', 'galaxy_user', metadata) - drop_column("creator_metadata", "workflow", metadata) - drop_column("license", "workflow", metadata) diff --git a/lib/galaxy/model/migrate/versions/0172_it_entrypoint_requires_domain.py b/lib/galaxy/model/migrate/versions/0172_it_entrypoint_requires_domain.py deleted file mode 100644 index 6cb997b40052..000000000000 --- a/lib/galaxy/model/migrate/versions/0172_it_entrypoint_requires_domain.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -Adds requires_domain column to InteractiveTools Entry Point (interactivetool_entry_point). -""" -import datetime -import logging - -from sqlalchemy import ( - Boolean, - Column, - MetaData, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - requires_domain = Column("requires_domain", Boolean, default=None) - add_column(requires_domain, "interactivetool_entry_point", metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("requires_domain", "interactivetool_entry_point", metadata) diff --git a/lib/galaxy/model/migrate/versions/0173_add_job_id_to_dataset.py b/lib/galaxy/model/migrate/versions/0173_add_job_id_to_dataset.py deleted file mode 100644 index 69ebfa8f067d..000000000000 --- a/lib/galaxy/model/migrate/versions/0173_add_job_id_to_dataset.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -Migration script for adding job_id column to dataset table. -""" - -import logging - -from sqlalchemy import ( - Column, - ForeignKey, - Integer, - MetaData, -) - -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - job_id_column = Column("job_id", Integer, ForeignKey("job.id"), index=True) - add_column(job_id_column, "dataset", metadata, index_name="ix_dataset_job_id") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("job_id", "dataset", metadata) diff --git a/lib/galaxy/model/migrate/versions/0174_readd_update_time_triggers.py b/lib/galaxy/model/migrate/versions/0174_readd_update_time_triggers.py deleted file mode 100644 index bd1ad2931c86..000000000000 --- a/lib/galaxy/model/migrate/versions/0174_readd_update_time_triggers.py +++ /dev/null @@ -1,28 +0,0 @@ -""" -Re-add triggers to update history.update_time when contents are changed. -""" - -import logging - -from sqlalchemy import MetaData - -from galaxy.model.migrate.triggers.history_update_time_field import ( - drop_timestamp_triggers, - install_timestamp_triggers, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - install_timestamp_triggers(migrate_engine) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - drop_timestamp_triggers(migrate_engine) diff --git a/lib/galaxy/model/migrate/versions/0175_history_audit.py b/lib/galaxy/model/migrate/versions/0175_history_audit.py deleted file mode 100644 index ab47956bea6c..000000000000 --- a/lib/galaxy/model/migrate/versions/0175_history_audit.py +++ /dev/null @@ -1,89 +0,0 @@ -""" -Add history audit table and associated triggers -""" - -import datetime -import logging - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - PrimaryKeyConstraint, - Table, -) - -from galaxy.model.migrate.triggers import history_update_time_field as old_triggers # rollback to old ones -from galaxy.model.migrate.triggers import update_audit_table as new_triggers # install me -from galaxy.model.migrate.versions.util import ( - create_table, - drop_table, -) - -log = logging.getLogger(__name__) -now = datetime.datetime.utcnow -metadata = MetaData() - -AuditTable = Table( - "history_audit", - metadata, - Column("history_id", Integer, ForeignKey("history.id"), primary_key=True, nullable=False), - Column("update_time", DateTime, default=now, primary_key=True, nullable=False), - PrimaryKeyConstraint(sqlite_on_conflict="IGNORE"), -) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - # create table + index - AuditTable.drop(migrate_engine, checkfirst=True) - create_table(AuditTable) - - # populate with update_time from every history - copy_update_times = """ - INSERT INTO history_audit (history_id, update_time) - SELECT id, update_time FROM history - """ - migrate_engine.execute(copy_update_times) - - # drop existing timestamp triggers - old_triggers.drop_timestamp_triggers(migrate_engine) - - # install new timestamp triggers - new_triggers.install(migrate_engine) - - -def downgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - # drop existing timestamp triggers - new_triggers.remove(migrate_engine) - - try: - # update history.update_time with vals from audit table - put_em_back = """ - UPDATE history h - SET update_time = a.max_update_time - FROM ( - SELECT history_id, max(update_time) as max_update_time - FROM history_audit - GROUP BY history_id - ) a - WHERE h.id = a.history_id - """ - migrate_engine.execute(put_em_back) - except Exception: - print("Unable to put update_times back") - - # drop audit table - drop_table(AuditTable) - - # install old timestamp triggers - old_triggers.install_timestamp_triggers(migrate_engine) diff --git a/lib/galaxy/model/migrate/versions/0176_add_indexes_on_update_time.py b/lib/galaxy/model/migrate/versions/0176_add_indexes_on_update_time.py deleted file mode 100644 index 12f3afe225c2..000000000000 --- a/lib/galaxy/model/migrate/versions/0176_add_indexes_on_update_time.py +++ /dev/null @@ -1,41 +0,0 @@ -""" -Migration script to add indexes on update_time columns that are frequently used in ORDER BY clauses. -""" - -import logging - -from sqlalchemy import MetaData - -from galaxy.model.migrate.versions.util import ( - add_index, - drop_index, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - -indexes = [ - ["ix_history_dataset_association_update_time", "history_dataset_association", "update_time"], - ["ix_library_dataset_dataset_association_update_time", "library_dataset_dataset_association", "update_time"], - ["ix_job_update_time", "job", "update_time"], - ["ix_history_dataset_collection_association_update_time", "history_dataset_collection_association", "update_time"], - ["ix_workflow_invocation_update_time", "workflow_invocation", "update_time"], - ["ix_stored_workflow_update_time", "stored_workflow", "update_time"], -] - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - for ix, table, col in indexes: - add_index(ix, table, col, metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - for ix, table, col in indexes: - drop_index(ix, table, col, metadata) diff --git a/lib/galaxy/model/migrate/versions/0177_update_job_state_summary.py b/lib/galaxy/model/migrate/versions/0177_update_job_state_summary.py deleted file mode 100644 index 7823b7512aea..000000000000 --- a/lib/galaxy/model/migrate/versions/0177_update_job_state_summary.py +++ /dev/null @@ -1,27 +0,0 @@ -""" -Update job-state-summary view for hdca elements to include job directly tied with the hdca -""" - -import logging - -from galaxy.model.view import HistoryDatasetCollectionJobStateSummary -from galaxy.model.view.utils import ( - CreateView, - DropView, -) - -log = logging.getLogger(__name__) - - -def upgrade(migrate_engine): - print(__doc__) - # drop first because sqlite does not support or_replace - downgrade(migrate_engine) - view = HistoryDatasetCollectionJobStateSummary - create_view = CreateView(view.name, view.__view__) - migrate_engine.execute(create_view) - - -def downgrade(migrate_engine): - drop_view = DropView(HistoryDatasetCollectionJobStateSummary.name) - migrate_engine.execute(drop_view) diff --git a/lib/galaxy/model/migrate/versions/0178_drop_deferredjob_table.py b/lib/galaxy/model/migrate/versions/0178_drop_deferredjob_table.py deleted file mode 100644 index 1e94a7f7505a..000000000000 --- a/lib/galaxy/model/migrate/versions/0178_drop_deferredjob_table.py +++ /dev/null @@ -1,67 +0,0 @@ -""" -Drop unused DeferredJob table and foreign key column on genome_index_tool_data. -""" - -import logging - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - String, - Table, - TEXT, -) - -from galaxy.model.custom_types import JSONType -from galaxy.model.migrate.versions.util import ( - add_column, - create_table, - drop_column, - drop_table, -) -from galaxy.model.orm.now import now - -log = logging.getLogger(__name__) -metadata = MetaData() - -DeferredJob_table = Table( - "deferred_job", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("state", String(64), index=True), - Column("plugin", String(128), index=True), - Column("params", JSONType), - Column("info", TEXT), -) - -deferred_job_id = Column("deferred_job_id", Integer, ForeignKey("deferred_job.id"), index=True) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - try: - drop_column(deferred_job_id.name, "genome_index_tool_data", metadata) - drop_table(DeferredJob_table) - except Exception: - log.exception("Dropping deferred_job table failed") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - try: - create_table(DeferredJob_table) - add_column( - deferred_job_id, "genome_index_tool_data", metadata, index_name="ix_genome_index_tool_data_deferred_job_id" - ) - except Exception: - log.exception("Creating deferred_job table failed") diff --git a/lib/galaxy/model/migrate/versions/0179_drop_transferjob_table.py b/lib/galaxy/model/migrate/versions/0179_drop_transferjob_table.py deleted file mode 100644 index acd6c76744b6..000000000000 --- a/lib/galaxy/model/migrate/versions/0179_drop_transferjob_table.py +++ /dev/null @@ -1,67 +0,0 @@ -""" -Drop unused TransferJob table and foreign key column on genome_index_tool_data. -""" - -import logging - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - Integer, - MetaData, - String, - Table, -) - -from galaxy.model.custom_types import JSONType -from galaxy.model.migrate.versions.util import ( - add_column, - create_table, - drop_column, - drop_table, -) -from galaxy.model.orm.now import now - -log = logging.getLogger(__name__) -metadata = MetaData() - -TransferJob_table = Table( - "transfer_job", - metadata, - Column("id", Integer, primary_key=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), - Column("state", String(64), index=True), - Column("path", String(1024)), - Column("params", JSONType), - Column("pid", Integer), - Column("socket", Integer), -) - -transfer_job_id = Column("transfer_job_id", Integer, ForeignKey("transfer_job.id"), index=True) - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - try: - drop_column(transfer_job_id.name, "genome_index_tool_data", metadata) - drop_table(TransferJob_table) - except Exception: - log.exception("Dropping transfer_job table failed") - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - try: - create_table(TransferJob_table) - add_column( - transfer_job_id, "genome_index_tool_data", metadata, index_name="ix_genome_index_tool_data_transfer_job_id" - ) - except Exception: - log.exception("Creating transfer_job table failed") diff --git a/lib/galaxy/model/migrate/versions/0180_add_vault_table.py b/lib/galaxy/model/migrate/versions/0180_add_vault_table.py deleted file mode 100644 index 7f18605b1b80..000000000000 --- a/lib/galaxy/model/migrate/versions/0180_add_vault_table.py +++ /dev/null @@ -1,41 +0,0 @@ -import datetime - -from sqlalchemy import ( - Column, - DateTime, - ForeignKey, - MetaData, - Table, - Text, -) - -now = datetime.datetime.utcnow -meta = MetaData() - -vault = Table( - "vault", - meta, - Column("key", Text, primary_key=True), - Column("parent_key", Text, ForeignKey("vault.key"), index=True, nullable=True), - Column("value", Text, nullable=True), - Column("create_time", DateTime, default=now), - Column("update_time", DateTime, default=now, onupdate=now), -) - - -def upgrade(migrate_engine): - meta.bind = migrate_engine - vault.create() - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - # This revision is not part of 21.09, but in 22.01 it will be handled by Alembic. - # It is possible to reach a state (via upgrading/downgrading) where this table - # will have been dropped by Alembic, but SQLAlchemy Migrate (our previous - # db migrations tool) will have version 180 (which includes this table). - # Downgrading from such a state would raise an error - which this code prevents. - try: - vault.drop() - except Exception: - pass diff --git a/lib/galaxy/model/migrate/versions/0181_add_source_metadata_workflow_table.py b/lib/galaxy/model/migrate/versions/0181_add_source_metadata_workflow_table.py deleted file mode 100644 index 597559728918..000000000000 --- a/lib/galaxy/model/migrate/versions/0181_add_source_metadata_workflow_table.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -Migration script for adding source_metadata column to workflow table. -""" - -import logging - -from sqlalchemy import ( - Column, - MetaData, -) - -from galaxy.model.custom_types import JSONType -from galaxy.model.migrate.versions.util import ( - add_column, - drop_column, -) - -log = logging.getLogger(__name__) -metadata = MetaData() - - -def upgrade(migrate_engine): - print(__doc__) - metadata.bind = migrate_engine - metadata.reflect() - - source_metadata_column = Column("source_metadata", JSONType) - add_column(source_metadata_column, "workflow", metadata) - - -def downgrade(migrate_engine): - metadata.bind = migrate_engine - metadata.reflect() - - drop_column("source_metadata", "workflow", metadata) diff --git a/lib/galaxy/model/migrate/versions/__init__.py b/lib/galaxy/model/migrate/versions/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/lib/galaxy/model/migrate/versions/util.py b/lib/galaxy/model/migrate/versions/util.py deleted file mode 100644 index 5bc4ab61a722..000000000000 --- a/lib/galaxy/model/migrate/versions/util.py +++ /dev/null @@ -1,205 +0,0 @@ -import hashlib -import logging - -from sqlalchemy import ( - BLOB, - DDL, - Index, - Table, - Text, -) -from sqlalchemy.dialects.mysql import MEDIUMBLOB - -log = logging.getLogger(__name__) - - -def engine_false(migrate_engine): - if migrate_engine.name in ["postgres", "postgresql"]: - return "FALSE" - elif migrate_engine.name in ["mysql", "sqlite"]: - return 0 - else: - raise Exception(f"Unknown database type: {migrate_engine.name}") - - -def engine_true(migrate_engine): - if migrate_engine.name in ["postgres", "postgresql"]: - return "TRUE" - elif migrate_engine.name in ["mysql", "sqlite"]: - return 1 - else: - raise Exception(f"Unknown database type: {migrate_engine.name}") - - -def nextval(migrate_engine, table, col="id"): - if migrate_engine.name in ["postgres", "postgresql"]: - return f"nextval('{table}_{col}_seq')" - elif migrate_engine.name in ["mysql", "sqlite"]: - return "null" - else: - raise Exception(f"Unable to convert data for unknown database type: {migrate_engine.name}") - - -def localtimestamp(migrate_engine): - if migrate_engine.name in ["mysql", "postgres", "postgresql"]: - return "LOCALTIMESTAMP" - elif migrate_engine.name == "sqlite": - return "current_date || ' ' || current_time" - else: - raise Exception(f"Unable to convert data for unknown database type: {migrate_engine.name}") - - -def truncate_index_name(index_name, engine): - # does what sqlalchemy does, see https://github.com/sqlalchemy/sqlalchemy/blob/8455a11bcc23e97afe666873cd872b0f204848d8/lib/sqlalchemy/sql/compiler.py#L4696 - max_index_name_length = engine.dialect.max_index_name_length or engine.dialect.max_identifier_length - if len(index_name) > max_index_name_length: - suffix = hashlib.md5(index_name.encode("utf-8")).hexdigest()[-4:] - index_name = f"{index_name[0:max_index_name_length - 8]}_{suffix}" - return index_name - - -def create_table(table): - try: - table.create() - except Exception: - log.exception("Creating table '%s' failed.", table) - - -def drop_table(table, metadata=None): - """ - :param table: Table to drop - :type table: :class:`Table` or str - """ - try: - if not isinstance(table, Table): - assert metadata is not None - table = Table(table, metadata, autoload=True) - table.drop() - except Exception: - log.exception("Dropping table '%s' failed.", table) - - -def add_column(column, table, metadata, **kwds): - """ - :param table: Table to add the column to - :type table: :class:`Table` or str - - :type metadata: :class:`Metadata` - """ - try: - index_to_create = None - migrate_engine = metadata.bind - if not isinstance(table, Table): - table = Table(table, metadata, autoload=True) - if migrate_engine.name == "sqlite" and column.index and column.foreign_keys: - # SQLAlchemy Migrate has a bug when adding a column with both a - # ForeignKey and an index in SQLite. Since SQLite creates an index - # anyway, we can drop the explicit index creation. - # TODO: this is hacky, but it solves this^ problem. Needs better solution. - index_to_create = (kwds["index_name"], table, column.name) - del kwds["index_name"] - column.index = False - column.create(table, **kwds) - assert column is table.c[column.name] - if index_to_create: - add_index(*index_to_create) - except Exception: - log.exception("Adding column '%s' to table '%s' failed.", column, table) - - -def alter_column(column_name, table, metadata=None, **kwds): - """ - :param table: Table to alter - :type table: :class:`Table` or str - - :param metadata: Needed only if ``table`` is a table name - :type metadata: :class:`Metadata` - """ - try: - if not isinstance(table, Table): - assert metadata is not None - table = Table(table, metadata, autoload=True) - column = table.c[column_name] - column.alter(**kwds) - except Exception: - log.exception("Modifying column '%s' of table '%s' failed.", column_name, table) - - -def drop_column(column_name, table, metadata=None): - """ - :param table: Table to drop the column from - :type table: :class:`Table` or str - - :param metadata: Needed only if ``table`` is a table name - :type metadata: :class:`Metadata` - """ - try: - if not isinstance(table, Table): - assert metadata is not None - table = Table(table, metadata, autoload=True) - column = table.c[column_name] - column.drop() - except Exception: - log.exception("Dropping column '%s' from table '%s' failed.", column_name, table) - - -def add_index(index_name, table, column_name, metadata=None, **kwds): - """ - :param table: Table to add the index to - :type table: :class:`Table` or str - - :param metadata: Needed only if ``table`` is a table name - :type metadata: :class:`Metadata` - """ - try: - if not isinstance(table, Table): - assert metadata is not None - table = Table(table, metadata, autoload=True) - index_name = truncate_index_name(index_name, table.metadata.bind) - if index_name not in [ix.name for ix in table.indexes]: - column = table.c[column_name] - # MySQL cannot index a TEXT/BLOB column without specifying mysql_length - if isinstance(column.type, (BLOB, MEDIUMBLOB, Text)): - kwds.setdefault("mysql_length", 200) - index = Index(index_name, column, **kwds) - index.create() - else: - log.debug("Index '%s' on column '%s' in table '%s' already exists.", index_name, column_name, table) - except Exception: - log.exception("Adding index '%s' on column '%s' to table '%s' failed.", index_name, column_name, table) - - -def drop_index(index, table, column_name=None, metadata=None): - """ - :param index: Index to drop - :type index: :class:`Index` or str - - :param table: Table to drop the index from - :type table: :class:`Table` or str - - :param metadata: Needed only if ``table`` is a table name - :type metadata: :class:`Metadata` - """ - try: - if not isinstance(index, Index): - if not isinstance(table, Table): - assert metadata is not None - table = Table(table, metadata, autoload=True) - index_name = truncate_index_name(index, table.metadata.bind) - if index_name in [ix.name for ix in table.indexes]: - index = Index(index_name, table.c[column_name]) - else: - log.debug("Index '%s' in table '%s' does not exist.", index, table) - return - index.drop() - except Exception: - log.exception("Dropping index '%s' from table '%s' failed", index, table) - - -def execute_statements(engine, raw_sql): - statements = raw_sql if isinstance(raw_sql, list) else [raw_sql] - for sql in statements: - cmd = DDL(sql) - with engine.connect() as connection: - with connection.begin(): - connection.execute(cmd) diff --git a/lib/galaxy/model/migrations/README.md b/lib/galaxy/model/migrations/README.md new file mode 100644 index 000000000000..9c5bcff49f6b --- /dev/null +++ b/lib/galaxy/model/migrations/README.md @@ -0,0 +1,76 @@ +# Galaxy's Migrations System + +## Overview + +To manage its database migrations, Galaxy uses [Alembic](https://alembic.sqlalchemy.org). + +Galaxy's data model is split into the [galaxy model](https://github.com/galaxyproject/galaxy/blob/dev/lib/galaxy/model/__init__.py) and the [install model](https://github.com/galaxyproject/galaxy/blob/dev/lib/galaxy/model/tool_shed_install/__init__.py). These two models may be persisted in one combined database (which is the default) or two separate databases (which is enabled by setting the [`install_database_connection`](https://github.com/galaxyproject/galaxy/blob/dev/lib/galaxy/webapps/galaxy/config_schema.yml#L157) configuration option). + +To accommodate this setup, the Alembic-based migrations system uses [branches](https://alembic.sqlalchemy.org/en/latest/branches.html#working-with-branches). A branch is a versioning lineage that starts at a common base revision and represents part of Galaxy's data model. These branches are identified by labels (`gxy` for the galaxy model and `tsi` for the install model) and may share the same Alembic version table (if they share the same database; otherwise, each database has its own version table). Each branch has its own version history, represented by revision scripts located in the branch version directory (`migrations/alembic/versions_gxy` for `gxy` and `migrations/alembic/versions_tsi` for `tsi`). + +## Administering Galaxy: upgrading and downgrading the database + +To create a database or initialize an empty database, use the `create_db.sh` script. For usage and options, see the inline documentation in the file. Note that this will create and/or initialize new databases (or one combined database) for the `gxy` and `tsi` models. This script does not handle the Tool Shed database. + +To upgrade or downgrade an existing database ***that has been migrated to Alembic***, use the `run_alembic.sh` script. For usage and options, see the inline documentation in the file. This script is a thin wrapper around the Alembic runner. It takes care of adjusting the path, initializing the Python virtual environtment, retrieving necessary configuration values, and invoking Alembic with appropriate arguments. + +Since Galaxy uses branch labels to distinguish between the galaxy and the install models, in most cases, you'll need to identify the target branch to which your command should be applied. Use branch labels: `gxy` for the galaxy model, and `tsi` for install model. + +### Examples of usage: + +Remember to first backup your database(s). + +#### To upgrade: +``` +./run_alembic.sh upgrade gxy@head # upgrade gxy to head revision +./run_alembic.sh upgrade gxy@+1 # upgrade gxy to 1 revision above current +./run_alembic.sh upgrade [revision identifier] # upgrade branch to a specific revision +./run_alembic.sh upgrade [revision identifier]+1 # upgrade branch to 1 revision above specific revision +./run_alembic.sh upgrade heads # upgrade gxy and tsi to head revisions +``` + +#### To downgrade: +``` +./run_alembic.sh downgrade gxy@base # downgrade gxy to base (empty db with empty alembic table) +./run_alembic.sh downgrade gxy@-1 # downgrade gxy to 1 revision below current +./run_alembic.sh downgrade [revision identifier] # downgrade branch to a specific revision +./run_alembic.sh downgrade [revision identifier]-1 # downgrade branch to 1 revision below specific revision +``` +Check [Alembic documentation](https://alembic.sqlalchemy.org/en/latest/branches.html#working-with-branches) for more examples. + +Note: relative upgrades and downgrades without a revision identifier are not supported - i.e., you cannot `upgrade +1` or `downgrade -1` without providing a revision identifier. However, you can upgrade both branches to their latest versions (head revisions) without providing a branch label: `upgrade heads`. + +### Legacy script + +The `manage_db.sh` script is still available, but is considered legacy. The script supports a subset of command line options offered previously by SQLAlchemy Migrate. For usage, see documentation in the file. + + +### Upgrading from SQLAlchemy Migrate + +Galaxy no longer supports SQLAlchemy Migrate. To upgrade to Alembic, follow these steps: + +1. Backup your database(s). + +2. Verify that your database is at the latest SQLAlchemy Migrate version. If you have a combined database, it should be version 179 (check the `migrate_version` table). If you have separate galaxy model and install model databases, your galaxy version should be 179, and your install model version should be 17. + +3. If your database is not current, before upgrading to the 22.01 release or the current dev branch, run `manage_db.sh upgrade` to upgrade your database. +Once your database has the latest SQLAlchemy Migrate version, you can check out the new code base (22.01 release or the current dev branch) and proceed to migrating to Alembic. + +4. If you want Alembic to upgrade your database automatically, you can set the [`database_auto_migrate`](https://github.com/galaxyproject/galaxy/blob/dev/lib/galaxy/webapps/galaxy/config_schema.yml#L170) configuration option and simply start Galaxy. Otherwise, use the `run_alembic.sh` script. + +## Developing Galaxy: creating new revisions + +When creating new revisions, as with upgrading/downgrading, you need to indicate the target branch. You use the same `run_alembic.sh` script; however, the syntax is slightly different: + +To create a revision for the galaxy model: +```./run_alembic.sh revision --head=gxy@head -m "your description"``` + +To create a revision for the install model: +```./run_alembic.sh revision --head=tsi@head -m "your description"``` + +Alembic will generate a revision script in the appropriate version directory (the location of the version directories is specified in the `alembic.ini` file). You'll need to fill out the `upgrade()` and `downgrade` functions. Use Alembic documentation for examples: +- [https://alembic.sqlalchemy.org/en/latest/tutorial.html#create-a-migration-script](https://alembic.sqlalchemy.org/en/latest/tutorial.html#create-a-migration-script) +- [https://alembic.sqlalchemy.org/en/latest/ops.html](https://alembic.sqlalchemy.org/en/latest/ops.html) + +After that, you may run the upgrade script: `manage_sb.sh upgrade heads`. And you're done! +(*NOTE: This step should be taken after updating the model and adding appropriate tests.*) diff --git a/lib/galaxy/model/migrations/__init__.py b/lib/galaxy/model/migrations/__init__.py new file mode 100644 index 000000000000..85696b352a1b --- /dev/null +++ b/lib/galaxy/model/migrations/__init__.py @@ -0,0 +1,516 @@ +import logging +import os +from typing import ( + cast, + Dict, + Iterable, + NamedTuple, + NewType, + NoReturn, + Optional, + Union, +) + +import alembic +from alembic import command +from alembic.config import Config +from alembic.runtime.migration import MigrationContext +from alembic.script import ScriptDirectory +from alembic.script.base import Script +from sqlalchemy import ( + create_engine, + MetaData, + Table, +) +from sqlalchemy.engine import ( + Connection, + CursorResult, + Engine, +) + +from galaxy.model import Base as gxy_base +from galaxy.model.database_utils import ( + create_database, + database_exists, +) +from galaxy.model.mapping import create_additional_database_objects +from galaxy.model.tool_shed_install import Base as tsi_base + +ModelId = NewType("ModelId", str) +# These identifiers are used throughout the migrations system to distinquish +# between the two models; they refer to version directories, branch labels, etc. +# (if you rename these, you need to rename branch labels in alembic version directories) +GXY = ModelId("gxy") # galaxy model identifier +TSI = ModelId("tsi") # tool_shed_install model identifier + +ALEMBIC_TABLE = "alembic_version" +SQLALCHEMYMIGRATE_TABLE = "migrate_version" +SQLALCHEMYMIGRATE_LAST_VERSION_GXY = 180 +SQLALCHEMYMIGRATE_LAST_VERSION_TSI = 17 +log = logging.getLogger(__name__) + + +class DatabaseConfig(NamedTuple): + url: str + template: str + encoding: str + + +class NoVersionTableError(Exception): + # The database has no version table (neither SQLAlchemy Migrate, nor Alembic), so it is + # impossible to automatically determine the state of the database. Manual update required. + def __init__(self, model: str) -> None: + super().__init__(f"Your {model} database has no version table; manual update is required") + + +class IncorrectVersionError(Exception): + # The database has a SQLAlchemy Migrate version table, but its version is either older or more recent + # than {SQLALCHEMYMIGRATE_LAST_VERSION_GXY/TSI}, so it cannot be upgraded with Alembic. + # (A more recent version may indicate that something has changed in the database past the point + # where we can automatically migrate from SQLAlchemy Migrate to Alembic.) + # Manual update required. + def __init__(self, model: str, expected_version: int) -> None: + msg = f"Your {model} database version is incorrect; version {expected_version} is expected. " + msg += "Manual update is required" + super().__init__(msg) + + +class OutdatedDatabaseError(Exception): + # The database is under Alembic version control, but is out-of-date. Automatic upgrade possible. + def __init__(self, model: str) -> None: + msg = f"Your {model} database is out-of-date; automatic update requires setting `database_auto_migrate`" + super().__init__(msg) + + +class InvalidModelIdError(Exception): + def __init__(self, model: str) -> None: + super().__init__(f"Invalid model: {model}") + + +class RevisionNotFoundError(Exception): + # The database has an Alembic version table; however, that table does not contain a revision identifier + # for the given model. As a result, it is impossible to determine the state of the database for this model + # (gxy or tsi). + def __init__(self, model: str) -> None: + msg = "The database has an alembic version table, but that table does not contain " + msg += f"a revision for the {model} model" + super().__init__(msg) + + +class AlembicManager: + """ + Alembic operations on one database. + """ + + @staticmethod + def is_at_revision(engine: Engine, revision: Union[str, Iterable[str]]) -> bool: + """ + True if revision is a subset of the set of version heads stored in the database. + """ + revision = listify(revision) + with engine.connect() as conn: + context = MigrationContext.configure(conn) + db_version_heads = context.get_current_heads() + return set(revision) <= set(db_version_heads) + + def __init__(self, engine: Engine, config_dict: Optional[dict] = None) -> None: + self.engine = engine + self.alembic_cfg = self._load_config(config_dict) + self.script_directory = ScriptDirectory.from_config(self.alembic_cfg) + self._db_heads: Optional[Iterable[str]] + self._reset_db_heads() + + def _load_config(self, config_dict: Optional[dict]) -> Config: + alembic_root = os.path.dirname(__file__) + _alembic_file = os.path.join(alembic_root, "alembic.ini") + config = Config(_alembic_file) + url = get_url_string(self.engine) + config.set_main_option("sqlalchemy.url", url) + if config_dict: + for key, value in config_dict.items(): + config.set_main_option(key, value) + return config + + def stamp_model_head(self, model: ModelId) -> None: + """Partial proxy to alembic's stamp command.""" + command.stamp(self.alembic_cfg, f"{model}@head") + self._reset_db_heads() + + def stamp_revision(self, revision: Union[str, Iterable[str]]) -> None: + """Partial proxy to alembic's stamp command.""" + command.stamp(self.alembic_cfg, revision) # type: ignore[arg-type] # https://alembic.sqlalchemy.org/en/latest/api/commands.html#alembic.command.stamp.params.revision + self._reset_db_heads() + + def upgrade(self, model: ModelId) -> None: + """Partial proxy to alembic's upgrade command.""" + # This works with or without an existing alembic version table. + command.upgrade(self.alembic_cfg, f"{model}@head") + self._reset_db_heads() + + def is_under_version_control(self, model: ModelId) -> bool: + """ + True if the database version table contains a revision that corresponds to a revision + in the script directory that has branch label `model`. + """ + if self.db_heads: + for db_head in self.db_heads: + try: + revision = self._get_revision(db_head) + if revision and model in revision.branch_labels: + log.info(f"The version of the {model} model in the database is {db_head}.") + return True + except alembic.util.exc.CommandError: # No need to raise exception. + log.info(f"Revision {db_head} does not exist in the script directory.") + return False + + def is_up_to_date(self, model: ModelId) -> bool: + """ + True if the head revision for `model` in the script directory is stored + in the database. + """ + head_id = self.get_model_script_head(model) + return bool(self.db_heads and head_id in self.db_heads) + + def get_model_db_head(self, model: ModelId) -> Optional[str]: + return self._get_head_revision(model, cast(Iterable[str], self.db_heads)) + + def get_model_script_head(self, model: ModelId) -> Optional[str]: + return self._get_head_revision(model, self.script_directory.get_heads()) + + def _get_head_revision(self, model: ModelId, heads: Iterable[str]) -> Optional[str]: + for head in heads: + revision = self._get_revision(head) + if revision and model in revision.branch_labels: + return head + return None + + @property + def db_heads(self) -> Iterable: + if self._db_heads is None: # Explicitly check for None: could be an empty tuple. + with self.engine.connect() as conn: + context: MigrationContext = MigrationContext.configure(conn) + self._db_heads = context.get_current_heads() + # We get a tuple as long as we use branches. Otherwise, we'd get a single value. + # listify() is a safeguard in case we stop using branches. + self._db_heads = listify(self._db_heads) + return self._db_heads + + def _get_revision(self, revision_id: str) -> Optional[Script]: + try: + return self.script_directory.get_revision(revision_id) + except alembic.util.exc.CommandError as e: + log.error(f"Revision {revision_id} not found in the script directory") + raise e + + def _reset_db_heads(self) -> None: + self._db_heads = None + + +class DatabaseStateCache: + """ + Snapshot of database state. + """ + + def __init__(self, engine: Engine) -> None: + self._load_db(engine) + + @property + def tables(self) -> Dict[str, Table]: + return self.db_metadata.tables + + def is_database_empty(self) -> bool: + return not bool(self.db_metadata.tables) + + def has_alembic_version_table(self) -> bool: + return ALEMBIC_TABLE in self.db_metadata.tables + + def has_sqlalchemymigrate_version_table(self) -> bool: + return SQLALCHEMYMIGRATE_TABLE in self.db_metadata.tables + + def is_last_sqlalchemymigrate_version(self, last_version: int) -> bool: + return self.sqlalchemymigrate_version == last_version + + def _load_db(self, engine: Engine) -> None: + with engine.connect() as conn: + self.db_metadata = self._load_db_metadata(conn) + self.sqlalchemymigrate_version = self._load_sqlalchemymigrate_version(conn) + + def _load_db_metadata(self, conn: Connection) -> MetaData: + metadata = MetaData() + metadata.reflect(bind=conn) + return metadata + + def _load_sqlalchemymigrate_version(self, conn: Connection) -> CursorResult: + if self.has_sqlalchemymigrate_version_table(): + sql = f"select version from {SQLALCHEMYMIGRATE_TABLE}" + return conn.execute(sql).scalar() + + +def verify_databases_via_script( + gxy_config: DatabaseConfig, + tsi_config: DatabaseConfig, + is_auto_migrate: bool = False, +) -> None: + # This function serves a use case when an engine has not been created yet + # (e.g. when called from a script). + gxy_engine = create_engine(gxy_config.url) + tsi_engine = None + if tsi_config.url and tsi_config.url != gxy_config.url: + tsi_engine = create_engine(tsi_config.url) + + verify_databases( + gxy_engine, + gxy_config.template, + gxy_config.encoding, + tsi_engine, + tsi_config.template, + tsi_config.encoding, + is_auto_migrate, + ) + gxy_engine.dispose() + if tsi_engine: + tsi_engine.dispose() + + +def verify_databases( + gxy_engine: Engine, + gxy_template: Optional[str], + gxy_encoding: Optional[str], + tsi_engine: Optional[Engine], + tsi_template: Optional[str], + tsi_encoding: Optional[str], + is_auto_migrate: bool, +) -> None: + # Verify gxy model. + gxy_verifier = DatabaseStateVerifier(gxy_engine, GXY, gxy_template, gxy_encoding, is_auto_migrate) + gxy_verifier.run() + + # New database = one engine or same engine, and gxy model has just been initialized. + is_new_database = (not tsi_engine or gxy_engine == tsi_engine) and gxy_verifier.is_new_database + + # Determine engine for tsi model. + tsi_engine = tsi_engine or gxy_engine + + # Verify tsi model model. + tsi_verifier = DatabaseStateVerifier(tsi_engine, TSI, tsi_template, tsi_encoding, is_auto_migrate, is_new_database) + tsi_verifier.run() + + +class DatabaseStateVerifier: + def __init__( + self, + engine: Engine, + model: ModelId, + database_template: Optional[str], + database_encoding: Optional[str], + is_auto_migrate: bool, + is_new_database: Optional[bool] = False, + ) -> None: + self.engine = engine + self.model = model + self.database_template = database_template + self.database_encoding = database_encoding + self._is_auto_migrate = is_auto_migrate + self.metadata = get_metadata(model) + # True if database has been initialized for another model. + self.is_new_database = is_new_database + # These values may or may not be required, so do a lazy load. + self._db_state: Optional[DatabaseStateCache] = None + self._alembic_manager: Optional[AlembicManager] = None + + @property + def is_auto_migrate(self) -> bool: + return self._is_auto_migrate + + @property + def db_state(self) -> DatabaseStateCache: + if not self._db_state: + self._db_state = DatabaseStateCache(engine=self.engine) + return self._db_state + + @property + def alembic_manager(self) -> AlembicManager: + if not self._alembic_manager: + self._alembic_manager = get_alembic_manager(self.engine) + return self._alembic_manager + + def run(self) -> None: + if self._handle_no_database(): + return + if self._handle_empty_database(): + return + self._handle_nonempty_database() + + def _handle_no_database(self) -> bool: + url = get_url_string(self.engine) + if not database_exists(url): + self._create_database(url) + self._initialize_database() + return True + return False + + def _handle_empty_database(self) -> bool: + if self.is_new_database or self._is_database_empty(): + self._initialize_database() + return True + return False + + def _handle_nonempty_database(self) -> None: + if self._has_alembic_version_table(): + self._handle_with_alembic() + elif self._has_sqlalchemymigrate_version_table(): + if self._is_last_sqlalchemymigrate_version(): + self._try_to_upgrade() + else: + self._handle_wrong_sqlalchemymigrate_version() + else: + self._handle_no_version_table() + + def _handle_with_alembic(self) -> None: + am = self.alembic_manager + model = self._get_model_name() + + if am.is_up_to_date(self.model): + log.info(f"Your {model} database is up-to-date") + return + if am.is_under_version_control(self.model): + # Model is under version control, but outdated. Try to upgrade. + self._try_to_upgrade() + else: + # Model is not under version control. We fail for the gxy model because we can't guess + # what the state of the database is if there is an alembic table without a gxy revision. + # For the tsi model, we can guess. If there are no tsi tables in the database, we treat it + # as a new install; but if there is at least one table, we assume it is the same version as gxy. + # See more details in this PR description: https://github.com/galaxyproject/galaxy/pull/13108 + if self.model == TSI: + if self._no_model_tables_exist(): + self._initialize_database() + else: + self._try_to_upgrade() + else: + raise RevisionNotFoundError(model) + + def _try_to_upgrade(self): + am = self.alembic_manager + model = self._get_model_name() + code_version = am.get_model_script_head(self.model) + if not self.is_auto_migrate: + db_version = am.get_model_db_head(self.model) + msg = self._get_upgrade_message(model, cast(str, db_version), cast(str, code_version)) + log.warning(msg) + raise OutdatedDatabaseError(model) + else: + log.info("Database is being upgraded to current version: {code_version}") + am.upgrade(self.model) + return + + def _get_upgrade_message(self, model: str, db_version: str, code_version: str) -> str: + msg = f"Your {model} database has version {db_version}, but this code expects " + msg += f"version {code_version}. " + msg += "This database can be upgraded automatically if database_auto_migrate is set. " + msg += "To upgrade manually, run `run_alembic.sh` (see instructions in that file). " + msg += "Please remember to backup your database before migrating." + return msg + + def _get_model_name(self) -> str: + return "galaxy" if self.model == GXY else "tool shed install" + + def _no_model_tables_exist(self) -> bool: + # True if there are no tables from `self.model` in the database. + db_tables = self.db_state.tables + for tablename in set(self.metadata.tables) - {ALEMBIC_TABLE}: + if tablename in db_tables: + return False + return True + + def _create_database(self, url: str) -> None: + create_kwds = {} + message = f"Creating database for URI [{url}]" + if self.database_template: + message += f" from template [{self.database_template}]" + create_kwds["template"] = self.database_template + if self.database_encoding: + message += f" with encoding [{self.database_encoding}]" + create_kwds["encoding"] = self.database_encoding + log.info(message) + create_database(url, **create_kwds) + + def _initialize_database(self) -> None: + load_metadata(self.metadata, self.engine) + if self.model == GXY: + self._create_additional_database_objects() + self.alembic_manager.stamp_model_head(self.model) + self.is_new_database = True + + def _create_additional_database_objects(self) -> None: + create_additional_database_objects(self.engine) + + def _is_database_empty(self) -> bool: + return self.db_state.is_database_empty() + + def _has_alembic_version_table(self) -> bool: + return self.db_state.has_alembic_version_table() + + def _has_sqlalchemymigrate_version_table(self) -> bool: + return self.db_state.has_sqlalchemymigrate_version_table() + + def _is_last_sqlalchemymigrate_version(self) -> bool: + last_version = get_last_sqlalchemymigrate_version(self.model) + return self.db_state.is_last_sqlalchemymigrate_version(last_version) + + def _handle_no_version_table(self) -> NoReturn: + model = self._get_model_name() + raise NoVersionTableError(model) + + def _handle_wrong_sqlalchemymigrate_version(self) -> NoReturn: + if self.model == GXY: + expected_version = SQLALCHEMYMIGRATE_LAST_VERSION_GXY + else: + expected_version = SQLALCHEMYMIGRATE_LAST_VERSION_TSI + model = self._get_model_name() + raise IncorrectVersionError(model, expected_version) + + +def get_last_sqlalchemymigrate_version(model: ModelId) -> int: + if model == GXY: + return SQLALCHEMYMIGRATE_LAST_VERSION_GXY + elif model == TSI: + return SQLALCHEMYMIGRATE_LAST_VERSION_TSI + else: + raise InvalidModelIdError(model) + + +def get_url_string(engine: Engine) -> str: + return engine.url.render_as_string(hide_password=False) + + +def get_alembic_manager(engine: Engine) -> AlembicManager: + return AlembicManager(engine) + + +def get_metadata(model: ModelId) -> MetaData: + if model == GXY: + return get_gxy_metadata() + elif model == TSI: + return get_tsi_metadata() + else: + raise InvalidModelIdError(model) + + +def load_metadata(metadata: MetaData, engine: Engine) -> None: + with engine.connect() as conn: + metadata.create_all(bind=conn) + + +def listify(data: Union[str, Iterable[str]]) -> Iterable[str]: + if not isinstance(data, (list, tuple)): + return [cast(str, data)] + return data + + +def get_gxy_metadata() -> MetaData: + return gxy_base.metadata + + +def get_tsi_metadata() -> MetaData: + return tsi_base.metadata diff --git a/lib/galaxy/model/migrations/alembic.ini b/lib/galaxy/model/migrations/alembic.ini new file mode 100644 index 000000000000..7e163261d5df --- /dev/null +++ b/lib/galaxy/model/migrations/alembic.ini @@ -0,0 +1,42 @@ +[alembic] +script_location = %(here)s/alembic +prepend_sys_path = . +version_locations = %(here)s/alembic/versions_gxy;%(here)s/alembic/versions_tsi +version_path_separator = ; + +[post_write_hooks] + +# Logging configuration +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARN +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARN +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/lib/galaxy/model/migrations/alembic/env.py b/lib/galaxy/model/migrations/alembic/env.py new file mode 100644 index 000000000000..a1ac4b29fb4b --- /dev/null +++ b/lib/galaxy/model/migrations/alembic/env.py @@ -0,0 +1,129 @@ +import re +from typing import ( + Callable, + cast, + Dict, +) + +from alembic import context +from alembic.script import ScriptDirectory +from alembic.script.base import Script +from sqlalchemy import create_engine + +from galaxy.model.migrations import ( + GXY, + TSI, +) + +config = context.config +target_metadata = None # Not implemented: used for autogenerate, which we don't use here. + + +def run_migrations_offline() -> None: + """Run migrations in offline mode; database url required.""" + if not config.cmd_opts: # invoked programmatically + url = _get_url_from_config() + _configure_and_run_migrations_offline(url) + else: # invoked via script + f = _configure_and_run_migrations_offline + _run_migrations_invoked_via_script(f) + + +def run_migrations_online() -> None: + """Run migrations in online mode: engine and connection required.""" + if not config.cmd_opts: # invoked programmatically + url = _get_url_from_config() + _configure_and_run_migrations_online(url) + else: # invoked via script + f = _configure_and_run_migrations_online + _run_migrations_invoked_via_script(f) + + +def _run_migrations_invoked_via_script(run_migrations: Callable[[str], None]) -> None: + urls = _load_urls() + + # Special case: the `current` command has no config.cmd_opts.revision property, + # so we check for it before checking for `upgrade/downgrade`. + if _process_cmd_current(urls): + return # we're done + + revision_str = config.cmd_opts.revision # type: ignore[union-attr] + + if revision_str.startswith(f"{GXY}@"): + url = urls[GXY] + elif revision_str.startswith(f"{TSI}@"): + url = urls[TSI] + else: + revision = _get_revision(revision_str) + if GXY in revision.branch_labels: + url = urls[GXY] + elif TSI in revision.branch_labels: + url = urls[TSI] + + run_migrations(url) + + +def _process_cmd_current(urls: Dict[str, str]) -> bool: + if config.cmd_opts.cmd[0].__name__ == "current": # type: ignore[union-attr] + for url in urls.values(): + _configure_and_run_migrations_online(url) + return True + return False + + +def _get_revision(revision_str: str) -> Script: + revision_id = _get_revision_id(revision_str) + script_directory = ScriptDirectory.from_config(config) + revision = script_directory.get_revision(revision_id) + if not revision: + raise Exception(f'Revision not found: "{revision}"') + return revision + + +def _get_revision_id(revision_str: str) -> str: + # Match a full or partial revision (GUID) or a relative migration identifier + p = re.compile(r"([0-9A-Fa-f]+)([+-]\d)?") + m = p.match(revision_str) + if not m: + raise Exception(f'Invalid revision or migration identifier: "{revision_str}"') + return m.group(1) + + +def _configure_and_run_migrations_offline(url: str) -> None: + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + ) + with context.begin_transaction(): + context.run_migrations() + + +def _configure_and_run_migrations_online(url) -> None: + engine = create_engine(url) + with engine.connect() as connection: + context.configure(connection=connection, target_metadata=target_metadata) + with context.begin_transaction(): + context.run_migrations() + engine.dispose() + + +def _get_url_from_config() -> str: + url = config.get_main_option("sqlalchemy.url") + return cast(str, url) + + +def _load_urls() -> Dict[str, str]: + gxy_url = context.get_x_argument(as_dictionary=True).get(f"{GXY}_url") + tsi_url = context.get_x_argument(as_dictionary=True).get(f"{TSI}_url") + return { + GXY: gxy_url, + TSI: tsi_url, + } + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/lib/galaxy/model/migrations/alembic/script.py.mako b/lib/galaxy/model/migrations/alembic/script.py.mako new file mode 100644 index 000000000000..2c0156303a8d --- /dev/null +++ b/lib/galaxy/model/migrations/alembic/script.py.mako @@ -0,0 +1,24 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision = ${repr(up_revision)} +down_revision = ${repr(down_revision)} +branch_labels = ${repr(branch_labels)} +depends_on = ${repr(depends_on)} + + +def upgrade(): + ${upgrades if upgrades else "pass"} + + +def downgrade(): + ${downgrades if downgrades else "pass"} diff --git a/lib/galaxy/model/migrations/alembic/versions_gxy/e7b6dcb09efd_create_gxy_branch.py b/lib/galaxy/model/migrations/alembic/versions_gxy/e7b6dcb09efd_create_gxy_branch.py new file mode 100644 index 000000000000..e76c4d87e30d --- /dev/null +++ b/lib/galaxy/model/migrations/alembic/versions_gxy/e7b6dcb09efd_create_gxy_branch.py @@ -0,0 +1,22 @@ +"""create gxy branch + +Revision ID: e7b6dcb09efd +Revises: +Create Date: 2021-11-05 16:32:43.243049 + +""" + + +# revision identifiers, used by Alembic. +revision = "e7b6dcb09efd" +down_revision = None +branch_labels = ("gxy",) +depends_on = None + + +def upgrade(): + pass + + +def downgrade(): + pass diff --git a/lib/galaxy/model/migrations/alembic/versions_tsi/d4a650f47a3c_create_tsi_branch.py b/lib/galaxy/model/migrations/alembic/versions_tsi/d4a650f47a3c_create_tsi_branch.py new file mode 100644 index 000000000000..87510a7e58a1 --- /dev/null +++ b/lib/galaxy/model/migrations/alembic/versions_tsi/d4a650f47a3c_create_tsi_branch.py @@ -0,0 +1,22 @@ +"""create tsi branch + +Revision ID: d4a650f47a3c +Revises: +Create Date: 2021-11-05 16:32:25.113750 + +""" + + +# revision identifiers, used by Alembic. +revision = "d4a650f47a3c" +down_revision = None +branch_labels = ("tsi",) +depends_on = None + + +def upgrade(): + pass + + +def downgrade(): + pass diff --git a/lib/galaxy/model/migrations/scripts.py b/lib/galaxy/model/migrations/scripts.py new file mode 100644 index 000000000000..429a9751726d --- /dev/null +++ b/lib/galaxy/model/migrations/scripts.py @@ -0,0 +1,307 @@ +import os +import re +import sys +from typing import ( + List, + Optional, + Tuple, +) + +import alembic.config +from alembic.config import Config +from alembic.runtime.migration import MigrationContext +from alembic.script import ScriptDirectory +from sqlalchemy import create_engine +from sqlalchemy.engine import Engine + +from galaxy.model.database_utils import is_one_database +from galaxy.model.migrations import ( + AlembicManager, + DatabaseConfig, + DatabaseStateCache, + GXY, + IncorrectVersionError, + SQLALCHEMYMIGRATE_LAST_VERSION_GXY, + TSI, +) +from galaxy.util.properties import ( + find_config_file, + get_data_dir, + load_app_properties, +) + +DEFAULT_CONFIG_NAMES = ["galaxy", "universe_wsgi"] +CONFIG_FILE_ARG = "--galaxy-config" +CONFIG_DIR_NAME = "config" +GXY_CONFIG_PREFIX = "GALAXY_CONFIG_" +TSI_CONFIG_PREFIX = "GALAXY_INSTALL_CONFIG_" + + +def get_configuration(argv: List[str], cwd: str) -> Tuple[DatabaseConfig, DatabaseConfig, bool]: + """ + Return a 3-item-tuple with configuration values used for managing databases. + """ + config_file = _pop_config_file(argv) + if config_file is None: + cwds = [cwd, os.path.join(cwd, CONFIG_DIR_NAME)] + config_file = find_config_file(DEFAULT_CONFIG_NAMES, dirs=cwds) + + # load gxy properties and auto-migrate + properties = load_app_properties(config_file=config_file, config_prefix=GXY_CONFIG_PREFIX) + default_url = f"sqlite:///{os.path.join(get_data_dir(properties), 'universe.sqlite')}?isolation_level=IMMEDIATE" + url = properties.get("database_connection", default_url) + template = properties.get("database_template", None) + encoding = properties.get("database_encoding", None) + is_auto_migrate = properties.get("database_auto_migrate", False) + gxy_config = DatabaseConfig(url, template, encoding) + + # load tsi properties + properties = load_app_properties(config_file=config_file, config_prefix=TSI_CONFIG_PREFIX) + default_url = gxy_config.url + url = properties.get("install_database_connection", default_url) + template = properties.get("database_template", None) + encoding = properties.get("database_encoding", None) + tsi_config = DatabaseConfig(url, template, encoding) + + return (gxy_config, tsi_config, is_auto_migrate) + + +def _pop_config_file(argv: List[str]) -> Optional[str]: + if CONFIG_FILE_ARG in argv: + pos = argv.index(CONFIG_FILE_ARG) + argv.pop(pos) # pop argument name + return argv.pop(pos) # pop and return argument value + return None + + +def add_db_urls_to_command_arguments(argv: List[str], gxy_url: str, tsi_url: str) -> None: + _insert_x_argument(argv, "tsi_url", tsi_url) + _insert_x_argument(argv, "gxy_url", gxy_url) + + +def _insert_x_argument(argv, key: str, value: str) -> None: + # `_insert_x_argument('mykey', 'myval')` transforms `foo -a 1` into `foo -x mykey=myval -a 42` + argv.insert(1, f"{key}={value}") + argv.insert(1, "-x") + + +def invoke_alembic() -> None: + """ + Invoke the Alembic command line runner. + + Accept 'heads' as the target revision argument to enable upgrading both gxy and tsi in one command. + This is consistent with Alembic's CLI, which allows `upgrade heads`. However, this would not work for + separate gxy and tsi databases: we can't attach a database url to a revision after Alembic has been + invoked with the 'upgrade' command and the 'heads' argument. So, instead we invoke Alembic for each head. + """ + if "heads" in sys.argv and "upgrade" in sys.argv: + i = sys.argv.index("heads") + sys.argv[i] = f"{GXY}@head" + alembic.config.main() + sys.argv[i] = f"{TSI}@head" + alembic.config.main() + else: + alembic.config.main() + + +class LegacyScriptsException(Exception): + # Misc. errors caused by incorrect arguments passed to a legacy script. + def __init__(self, message: str) -> None: + super().__init__(message) + + +class LegacyScripts: + + LEGACY_CONFIG_FILE_ARG_NAMES = ["-c", "--config", "--config-file"] + ALEMBIC_CONFIG_FILE_ARG = "--alembic-config" # alembic config file, set in the calling script + DEFAULT_DB_ARG = "default" + + def __init__(self, argv: List[str], cwd: Optional[str] = None) -> None: + self.argv = argv + self.cwd = cwd or os.getcwd() + self.database = self.DEFAULT_DB_ARG + + def run(self) -> None: + """ + Convert legacy arguments to current spec required by Alembic, + then add db url arguments required by Alembic + """ + self.convert_args() + add_db_urls_to_command_arguments(self.argv, self.gxy_url, self.tsi_url) + + def convert_args(self) -> None: + """ + Convert legacy arguments to current spec required by Alembic. + + Note: The following method calls must be done in this sequence. + """ + self.pop_database_argument() + self.rename_config_argument() + self.rename_alembic_config_argument() + self.load_db_urls() + self.convert_version_argument() + + def pop_database_argument(self) -> None: + """ + If last argument is a valid database name, pop and assign it; otherwise assign default. + """ + arg = self.argv[-1] + if arg in ["galaxy", "install"]: + self.database = self.argv.pop() + + def rename_config_argument(self) -> None: + """ + Rename the optional config argument: we can't use '-c' because that option is used by Alembic. + """ + for arg in self.LEGACY_CONFIG_FILE_ARG_NAMES: + if arg in self.argv: + self._rename_arg(arg, CONFIG_FILE_ARG) + return + + def rename_alembic_config_argument(self) -> None: + """ + Rename argument name: `--alembic-config` to `-c`. There should be no `-c` argument present. + """ + if "-c" in self.argv: + raise LegacyScriptsException("Cannot rename alembic config argument: `-c` argument present.") + self._rename_arg(self.ALEMBIC_CONFIG_FILE_ARG, "-c") + + def convert_version_argument(self) -> None: + """ + Convert legacy version argument to current spec required by Alembic. + """ + if "--version" in self.argv: + # Just remove it: the following argument should be the version/revision identifier. + pos = self.argv.index("--version") + self.argv.pop(pos) + else: + # If we find --version=foo, extract foo and replace arg with foo (which is the revision identifier) + p = re.compile(r"--version=([0-9A-Fa-f]+)") + for i, arg in enumerate(self.argv): + m = p.match(arg) + if m: + self.argv[i] = m.group(1) + return + # No version argument found: construct argument for an upgrade operation. + # Raise exception otherwise. + if "upgrade" not in self.argv: + raise LegacyScriptsException("If no `--version` argument supplied, `upgrade` argument is requried") + + if self._is_one_database(): # upgrade both regardless of database argument + self.argv.append("heads") + else: # for separate databases, choose one + if self.database in ["galaxy", self.DEFAULT_DB_ARG]: + self.argv.append("gxy@head") + elif self.database == "install": + self.argv.append("tsi@head") + + def _rename_arg(self, old_name, new_name) -> None: + pos = self.argv.index(old_name) + self.argv[pos] = new_name + + def load_db_urls(self) -> None: + gxy_config, tsi_config, _ = get_configuration(self.argv, self.cwd) + self.gxy_url = gxy_config.url + self.tsi_url = tsi_config.url + + def _is_one_database(self): + return is_one_database(self.gxy_url, self.tsi_url) + + +class LegacyManageDb: + def __init__(self): + self._set_db_urls() + + def get_gxy_version(self): + """ + Get the head revision for the gxy branch from the Alembic script directory. + (previously referred to as "max/repository version") + """ + script_directory = self._get_script_directory() + heads = script_directory.get_heads() + for head in heads: + revision = script_directory.get_revision(head) + if revision and GXY in revision.branch_labels: + return head + return None + + def get_gxy_db_version(self, gxy_db_url=None): + """ + Get the head revision for the gxy branch from the galaxy database. If there + is no alembic_version table, get the sqlalchemy migrate version. Raise error + if that version is not the latest. + (previously referred to as "database version") + """ + db_url = gxy_db_url or self.gxy_db_url + try: + engine = create_engine(db_url) + version = self._get_gxy_alembic_db_version(engine) + if not version: + version = self._get_gxy_sam_db_version(engine) + if version != SQLALCHEMYMIGRATE_LAST_VERSION_GXY: + raise IncorrectVersionError(GXY, SQLALCHEMYMIGRATE_LAST_VERSION_GXY) + return version + finally: + engine.dispose() + + def run_upgrade(self, gxy_db_url=None, tsi_db_url=None): + """ + Alembic will upgrade both branches, gxy and tsi, to their head revisions. + """ + gxy_db_url = gxy_db_url or self.gxy_db_url + tsi_db_url = tsi_db_url or self.tsi_db_url + self._upgrade(gxy_db_url, GXY) + self._upgrade(tsi_db_url, TSI) + + def _upgrade(self, db_url, model): + try: + engine = create_engine(db_url) + am = get_alembic_manager(engine) + am.upgrade(model) + finally: + engine.dispose() + + def _set_db_urls(self): + ls = LegacyScripts(sys.argv, os.getcwd()) + ls.rename_config_argument() + ls.load_db_urls() + self.gxy_db_url = ls.gxy_url + self.tsi_db_url = ls.tsi_url + + def _get_gxy_sam_db_version(self, engine): + dbcache = DatabaseStateCache(engine) + return dbcache.sqlalchemymigrate_version + + def _get_script_directory(self): + alembic_cfg = self._get_alembic_cfg() + return ScriptDirectory.from_config(alembic_cfg) + + def _get_alembic_cfg(self): + config_file = os.path.join(os.path.dirname(__file__), "alembic.ini") + config_file = os.path.abspath(config_file) + return Config(config_file) + + def _get_gxy_alembic_db_version(self, engine): + # We may get 2 values, one for each branch (gxy and tsi). So we need to + # determine which one is the gxy head. + with engine.connect() as conn: + context = MigrationContext.configure(conn) + db_heads = context.get_current_heads() + if db_heads: + gxy_revisions = self._get_all_gxy_revisions() + for db_head in db_heads: + if db_head in gxy_revisions: + return db_head + return None + + def _get_all_gxy_revisions(self): + gxy_revisions = set() + script_directory = self._get_script_directory() + for rev in script_directory.walk_revisions(): + if GXY in rev.branch_labels: + gxy_revisions.add(rev.revision) + return gxy_revisions + + +def get_alembic_manager(engine: Engine) -> AlembicManager: + return AlembicManager(engine) diff --git a/lib/galaxy/model/orm/engine_factory.py b/lib/galaxy/model/orm/engine_factory.py index d8def0560bd0..0c67546ce354 100644 --- a/lib/galaxy/model/orm/engine_factory.py +++ b/lib/galaxy/model/orm/engine_factory.py @@ -48,7 +48,7 @@ def pretty_stack(): def build_engine( url, - engine_options, + engine_options=None, database_query_profiling_proxy=False, trace_logger=None, slow_query_log_threshold=0, @@ -104,6 +104,7 @@ def after_cursor_execute(conn, cursor, statement, parameters, context, executema if "sqlite://" in url: connect_args["check_same_thread"] = False # Create the database engine + engine_options = engine_options or {} engine = create_engine(url, connect_args=connect_args, **engine_options) register_after_fork(engine, lambda e: e.dispose()) return engine diff --git a/lib/galaxy/model/orm/scripts.py b/lib/galaxy/model/orm/scripts.py index f62f4fb0e73a..42e3d612b890 100644 --- a/lib/galaxy/model/orm/scripts.py +++ b/lib/galaxy/model/orm/scripts.py @@ -1,13 +1,18 @@ """ -Code to support database helper scripts (create_db.py, manage_db.py, etc...). +Code to support database helper scripts (create_toolshed_db.py, migrate_toolshed_db.py, etc...). """ import argparse import logging import os import sys -from migrate.versioning.shell import main as migrate_main +import alembic.config +from galaxy.model.migrations import ( + GXY, + TSI, +) +from galaxy.model.migrations.scripts import get_configuration from galaxy.util.path import get_ext from galaxy.util.properties import ( find_config_file, @@ -25,12 +30,6 @@ DATABASE = { "galaxy": { - "repo": "galaxy/model/migrate", - "default_sqlite_file": "universe.sqlite", - "config_override": "GALAXY_CONFIG_", - }, - "tools": { - "repo": "galaxy/model/tool_shed_install/migrate", "default_sqlite_file": "universe.sqlite", "config_override": "GALAXY_CONFIG_", }, @@ -42,7 +41,6 @@ "config_section": "tool_shed", }, "install": { - "repo": "galaxy/model/tool_shed_install/migrate", "config_prefix": "install_", "default_sqlite_file": "install.sqlite", "config_override": "GALAXY_INSTALL_CONFIG_", @@ -109,8 +107,6 @@ def get_config(argv, use_argparse=True, cwd=None): >>> uri_with_env = os.getenv("GALAXY_TEST_DBURI", "sqlite:////moo/universe.sqlite?isolation_level=IMMEDIATE") >>> config['db_url'] == uri_with_env True - >>> config['repo'].endswith('galaxy/model/migrate') - True >>> rmtree(config_dir) """ config_file, config_section, database = _read_model_arguments(argv, use_argparse=use_argparse) @@ -123,7 +119,10 @@ def get_config(argv, use_argparse=True, cwd=None): cwd = [DEFAULT_CONFIG_DIR] config_file = find_config_file(config_names, dirs=cwd) - repo = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, database_defaults["repo"]) + repo = database_defaults.get("repo") + if repo: + repo = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, repo) + config_prefix = database_defaults.get("config_prefix", DEFAULT_CONFIG_PREFIX) config_override = database_defaults.get("config_override", "GALAXY_CONFIG_") default_sqlite_file = database_defaults["default_sqlite_file"] @@ -153,6 +152,21 @@ def get_config(argv, use_argparse=True, cwd=None): def manage_db(): - # Migrate has its own args, so cannot use argparse - config = get_config(sys.argv, use_argparse=False, cwd=os.getcwd()) - migrate_main(repository=config["repo"], url=config["db_url"]) + # This is a duplicate implementation of scripts/migrate_db.py. + # See run_alembic.sh for usage. + def _insert_x_argument(key, value): + sys.argv.insert(1, f"{key}={value}") + sys.argv.insert(1, "-x") + + gxy_config, tsi_config, _ = get_configuration(sys.argv, os.getcwd()) + _insert_x_argument("tsi_url", tsi_config.url) + _insert_x_argument("gxy_url", gxy_config.url) + + if "heads" in sys.argv and "upgrade" in sys.argv: + i = sys.argv.index("heads") + sys.argv[i] = f"{GXY}@head" + alembic.config.main() + sys.argv[i] = f"{TSI}@head" + alembic.config.main() + else: + alembic.config.main() diff --git a/lib/galaxy/model/tool_shed_install/mapping.py b/lib/galaxy/model/tool_shed_install/mapping.py index ec5586d9da9c..acef2ca8c77d 100644 --- a/lib/galaxy/model/tool_shed_install/mapping.py +++ b/lib/galaxy/model/tool_shed_install/mapping.py @@ -7,15 +7,16 @@ def init(url, engine_options=None, create_tables=False): - """Connect mappings to the database""" - # Load the appropriate db module - engine_options = engine_options or {} engine = build_engine(url, engine_options) - result = ModelMapping([install_model], engine=engine) - # Create tables if needed if create_tables: - metadata.create_all(bind=engine) - # metadata.engine.commit() - result.create_tables = create_tables - # load local galaxy security policy - return result + create_database_objects(engine) + return configure_model_mapping(engine) + + +def create_database_objects(engine): + mapper_registry.metadata.create_all(bind=engine) + + +def configure_model_mapping(engine): + # TODO: do we need to load local galaxy security policy? + return ModelMapping([install_model], engine=engine) diff --git a/lib/galaxy/model/tool_shed_install/migrate/__init__.py b/lib/galaxy/model/tool_shed_install/migrate/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/lib/galaxy/model/tool_shed_install/migrate/check.py b/lib/galaxy/model/tool_shed_install/migrate/check.py deleted file mode 100644 index 199b31b8bdf9..000000000000 --- a/lib/galaxy/model/tool_shed_install/migrate/check.py +++ /dev/null @@ -1,120 +0,0 @@ -import logging -import os.path -import sys - -from migrate.versioning import ( - repository, - schema, -) -from sqlalchemy import ( - create_engine, - MetaData, - Table, -) -from sqlalchemy.exc import NoSuchTableError - -from galaxy.model.database_utils import ( - create_database, - database_exists, -) -from galaxy.model.tool_shed_install import mapping - -log = logging.getLogger(__name__) - -# path relative to galaxy -migrate_repository_directory = os.path.abspath(os.path.dirname(__file__)).replace(os.getcwd() + os.path.sep, "", 1) -migrate_repository = repository.Repository(migrate_repository_directory) - - -def create_or_verify_database(url, engine_options=None, app=None): - """ """ - # Create engine and metadata - engine_options = engine_options or {} - if not database_exists(url): - message = f"Creating database for URI [{url}]" - log.info(message) - create_database(url) - - engine = create_engine(url, **engine_options) - - def migrate(): - try: - # Declare the database to be under a repository's version control - db_schema = schema.ControlledSchema.create(engine, migrate_repository) - except Exception: - # The database is already under version control - db_schema = schema.ControlledSchema(engine, migrate_repository) - # Apply all scripts to get to current version - migrate_to_current_version(engine, db_schema) - - meta = MetaData(bind=engine) - if app and getattr(app.config, "database_auto_migrate", False): - migrate() - return - - # Try to load tool_shed_repository table - try: - Table("tool_shed_repository", meta, autoload=True) - except NoSuchTableError: - # No table means a completely uninitialized database. If we - # have an app, we'll set its new_installation setting to True - # so the tool migration process will be skipped. - log.info("Creating install database from scratch, skipping migrations") - mapping.init(url=url, create_tables=True) - current_version = migrate_repository.version().version - schema.ControlledSchema.create(engine, migrate_repository, version=current_version) - db_schema = schema.ControlledSchema(engine, migrate_repository) - assert db_schema.version == current_version - migrate() - return - - try: - Table("migrate_version", meta, autoload=True) - except NoSuchTableError: - # The database exists but is not yet under migrate version control, so init with version 1 - log.info("Adding version control to existing database") - try: - Table("metadata_file", meta, autoload=True) - schema.ControlledSchema.create(engine, migrate_repository, version=2) - except NoSuchTableError: - schema.ControlledSchema.create(engine, migrate_repository, version=1) - - # Verify that the code and the DB are in sync - db_schema = schema.ControlledSchema(engine, migrate_repository) - if migrate_repository.versions.latest != db_schema.version: - exception_msg = "Your database has version '%d' but this code expects version '%d'. " % ( - db_schema.version, - migrate_repository.versions.latest, - ) - exception_msg += "Back up your database and then migrate the schema by running the following from your Galaxy installation directory:" - exception_msg += "\n\nsh manage_db.sh upgrade install\n" - - else: - log.info("At database version %d" % db_schema.version) - - -def migrate_to_current_version(engine, schema): - # Changes to get to current version - changeset = schema.changeset(None) - for ver, change in changeset: - nextver = ver + changeset.step - log.info(f"Migrating {ver} -> {nextver}... ") - old_stdout = sys.stdout - - class FakeStdout: - def __init__(self): - self.buffer = [] - - def write(self, s): - self.buffer.append(s) - - def flush(self): - pass - - sys.stdout = FakeStdout() - try: - schema.runchange(ver, change, changeset.step) - finally: - for message in "".join(sys.stdout.buffer).split("\n"): - log.info(message) - sys.stdout = old_stdout diff --git a/lib/galaxy/model/tool_shed_install/migrate/migrate.cfg b/lib/galaxy/model/tool_shed_install/migrate/migrate.cfg deleted file mode 100644 index acfc9879b879..000000000000 --- a/lib/galaxy/model/tool_shed_install/migrate/migrate.cfg +++ /dev/null @@ -1,20 +0,0 @@ -[db_settings] -# Used to identify which repository this database is versioned under. -# You can use the name of your project. -repository_id=ToolShedInstall - -# The name of the database table used to track the schema version. -# This name shouldn't already be used by your project. -# If this is changed once a database is under version control, you'll need to -# change the table name in each database too. -version_table=migrate_version - -# When committing a change script, Migrate will attempt to generate the -# sql for all supported databases; normally, if one of them fails - probably -# because you don't have that database installed - it is ignored and the -# commit continues, perhaps ending successfully. -# Databases in this list MUST compile successfully during a commit, or the -# entire commit will fail. List the databases your application will actually -# be using to ensure your updates to that database work properly. -# This must be a list; example: ['postgres','sqlite'] -required_dbs=[] diff --git a/lib/galaxy/model/tool_shed_install/migrate/versions/0001_add_tool_shed_repository_table.py b/lib/galaxy/model/tool_shed_install/migrate/versions/0001_add_tool_shed_repository_table.py deleted file mode 120000 index e0b183100f27..000000000000 --- a/lib/galaxy/model/tool_shed_install/migrate/versions/0001_add_tool_shed_repository_table.py +++ /dev/null @@ -1 +0,0 @@ -../../../migrate/versions/0082_add_tool_shed_repository_table.py \ No newline at end of file diff --git a/lib/galaxy/model/tool_shed_install/migrate/versions/0002_add_tool_shed_repository_table_columns.py b/lib/galaxy/model/tool_shed_install/migrate/versions/0002_add_tool_shed_repository_table_columns.py deleted file mode 120000 index 7cacf92509ec..000000000000 --- a/lib/galaxy/model/tool_shed_install/migrate/versions/0002_add_tool_shed_repository_table_columns.py +++ /dev/null @@ -1 +0,0 @@ -../../../migrate/versions/0086_add_tool_shed_repository_table_columns.py \ No newline at end of file diff --git a/lib/galaxy/model/tool_shed_install/migrate/versions/0003_tool_id_guid_map_table.py b/lib/galaxy/model/tool_shed_install/migrate/versions/0003_tool_id_guid_map_table.py deleted file mode 120000 index 3e9dea9c20a6..000000000000 --- a/lib/galaxy/model/tool_shed_install/migrate/versions/0003_tool_id_guid_map_table.py +++ /dev/null @@ -1 +0,0 @@ -../../../migrate/versions/0087_tool_id_guid_map_table.py \ No newline at end of file diff --git a/lib/galaxy/model/tool_shed_install/migrate/versions/0004_add_installed_changeset_revison_column.py b/lib/galaxy/model/tool_shed_install/migrate/versions/0004_add_installed_changeset_revison_column.py deleted file mode 120000 index 4e57eb188b8e..000000000000 --- a/lib/galaxy/model/tool_shed_install/migrate/versions/0004_add_installed_changeset_revison_column.py +++ /dev/null @@ -1 +0,0 @@ -../../../migrate/versions/0088_add_installed_changeset_revison_column.py \ No newline at end of file diff --git a/lib/galaxy/model/tool_shed_install/migrate/versions/0005_add_tool_shed_repository_table_columns.py b/lib/galaxy/model/tool_shed_install/migrate/versions/0005_add_tool_shed_repository_table_columns.py deleted file mode 120000 index 6359c3b9bbb2..000000000000 --- a/lib/galaxy/model/tool_shed_install/migrate/versions/0005_add_tool_shed_repository_table_columns.py +++ /dev/null @@ -1 +0,0 @@ -../../../migrate/versions/0090_add_tool_shed_repository_table_columns.py \ No newline at end of file diff --git a/lib/galaxy/model/tool_shed_install/migrate/versions/0006_add_tool_version_tables.py b/lib/galaxy/model/tool_shed_install/migrate/versions/0006_add_tool_version_tables.py deleted file mode 120000 index 9a4526d9b960..000000000000 --- a/lib/galaxy/model/tool_shed_install/migrate/versions/0006_add_tool_version_tables.py +++ /dev/null @@ -1 +0,0 @@ -../../../migrate/versions/0091_add_tool_version_tables.py \ No newline at end of file diff --git a/lib/galaxy/model/tool_shed_install/migrate/versions/0007_add_migrate_tools_table.py b/lib/galaxy/model/tool_shed_install/migrate/versions/0007_add_migrate_tools_table.py deleted file mode 120000 index 9fbb816a5cfa..000000000000 --- a/lib/galaxy/model/tool_shed_install/migrate/versions/0007_add_migrate_tools_table.py +++ /dev/null @@ -1 +0,0 @@ -../../../migrate/versions/0092_add_migrate_tools_table.py \ No newline at end of file diff --git a/lib/galaxy/model/tool_shed_install/migrate/versions/0008_add_ctx_rev_column.py b/lib/galaxy/model/tool_shed_install/migrate/versions/0008_add_ctx_rev_column.py deleted file mode 120000 index a1268f1ff034..000000000000 --- a/lib/galaxy/model/tool_shed_install/migrate/versions/0008_add_ctx_rev_column.py +++ /dev/null @@ -1 +0,0 @@ -../../../migrate/versions/0097_add_ctx_rev_column.py \ No newline at end of file diff --git a/lib/galaxy/model/tool_shed_install/migrate/versions/0009_add_tool_dependency_table.py b/lib/galaxy/model/tool_shed_install/migrate/versions/0009_add_tool_dependency_table.py deleted file mode 120000 index a119ddd378f3..000000000000 --- a/lib/galaxy/model/tool_shed_install/migrate/versions/0009_add_tool_dependency_table.py +++ /dev/null @@ -1 +0,0 @@ -../../../migrate/versions/0099_add_tool_dependency_table.py \ No newline at end of file diff --git a/lib/galaxy/model/tool_shed_install/migrate/versions/0010_alter_tool_dependency_table_version_column.py b/lib/galaxy/model/tool_shed_install/migrate/versions/0010_alter_tool_dependency_table_version_column.py deleted file mode 120000 index 1a39c5872a28..000000000000 --- a/lib/galaxy/model/tool_shed_install/migrate/versions/0010_alter_tool_dependency_table_version_column.py +++ /dev/null @@ -1 +0,0 @@ -../../../migrate/versions/0100_alter_tool_dependency_table_version_column.py \ No newline at end of file diff --git a/lib/galaxy/model/tool_shed_install/migrate/versions/0011_drop_installed_changeset_revision_column.py b/lib/galaxy/model/tool_shed_install/migrate/versions/0011_drop_installed_changeset_revision_column.py deleted file mode 120000 index eed3a389c274..000000000000 --- a/lib/galaxy/model/tool_shed_install/migrate/versions/0011_drop_installed_changeset_revision_column.py +++ /dev/null @@ -1 +0,0 @@ -../../../migrate/versions/0101_drop_installed_changeset_revision_column.py \ No newline at end of file diff --git a/lib/galaxy/model/tool_shed_install/migrate/versions/0012_add_tool_dependency_status_columns.py b/lib/galaxy/model/tool_shed_install/migrate/versions/0012_add_tool_dependency_status_columns.py deleted file mode 120000 index c5759e0e88d9..000000000000 --- a/lib/galaxy/model/tool_shed_install/migrate/versions/0012_add_tool_dependency_status_columns.py +++ /dev/null @@ -1 +0,0 @@ -../../../migrate/versions/0102_add_tool_dependency_status_columns.py \ No newline at end of file diff --git a/lib/galaxy/model/tool_shed_install/migrate/versions/0013_add_tool_shed_repository_status_columns.py b/lib/galaxy/model/tool_shed_install/migrate/versions/0013_add_tool_shed_repository_status_columns.py deleted file mode 120000 index c17fdea94214..000000000000 --- a/lib/galaxy/model/tool_shed_install/migrate/versions/0013_add_tool_shed_repository_status_columns.py +++ /dev/null @@ -1 +0,0 @@ -../../../migrate/versions/0103_add_tool_shed_repository_status_columns.py \ No newline at end of file diff --git a/lib/galaxy/model/tool_shed_install/migrate/versions/0014_add_repository_dependency_tables.py b/lib/galaxy/model/tool_shed_install/migrate/versions/0014_add_repository_dependency_tables.py deleted file mode 120000 index 5f07fdf3a634..000000000000 --- a/lib/galaxy/model/tool_shed_install/migrate/versions/0014_add_repository_dependency_tables.py +++ /dev/null @@ -1 +0,0 @@ -../../../migrate/versions/0109_add_repository_dependency_tables.py \ No newline at end of file diff --git a/lib/galaxy/model/tool_shed_install/migrate/versions/0015_update_migrate_tools_table.py b/lib/galaxy/model/tool_shed_install/migrate/versions/0015_update_migrate_tools_table.py deleted file mode 120000 index 55c1ced6cd53..000000000000 --- a/lib/galaxy/model/tool_shed_install/migrate/versions/0015_update_migrate_tools_table.py +++ /dev/null @@ -1 +0,0 @@ -../../../migrate/versions/0113_update_migrate_tools_table.py \ No newline at end of file diff --git a/lib/galaxy/model/tool_shed_install/migrate/versions/0016_update_migrate_tools_table_again.py b/lib/galaxy/model/tool_shed_install/migrate/versions/0016_update_migrate_tools_table_again.py deleted file mode 120000 index 7af8a2a1890c..000000000000 --- a/lib/galaxy/model/tool_shed_install/migrate/versions/0016_update_migrate_tools_table_again.py +++ /dev/null @@ -1 +0,0 @@ -../../../migrate/versions/0114_update_migrate_tools_table_again.py \ No newline at end of file diff --git a/lib/galaxy/model/tool_shed_install/migrate/versions/0017_drop_update_available_col_add_tool_shed_status_col.py b/lib/galaxy/model/tool_shed_install/migrate/versions/0017_drop_update_available_col_add_tool_shed_status_col.py deleted file mode 120000 index d874f486173d..000000000000 --- a/lib/galaxy/model/tool_shed_install/migrate/versions/0017_drop_update_available_col_add_tool_shed_status_col.py +++ /dev/null @@ -1 +0,0 @@ -../../../migrate/versions/0116_drop_update_available_col_add_tool_shed_status_col.py \ No newline at end of file diff --git a/lib/galaxy/model/tool_shed_install/migrate/versions/__init__.py b/lib/galaxy/model/tool_shed_install/migrate/versions/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/lib/galaxy/model/migrate/__init__.py b/lib/galaxy/model/triggers/__init__.py similarity index 100% rename from lib/galaxy/model/migrate/__init__.py rename to lib/galaxy/model/triggers/__init__.py diff --git a/lib/galaxy/model/migrate/triggers/history_update_time_field.py b/lib/galaxy/model/triggers/history_update_time_field.py similarity index 98% rename from lib/galaxy/model/migrate/triggers/history_update_time_field.py rename to lib/galaxy/model/triggers/history_update_time_field.py index f25208eeae1d..91315ae82891 100644 --- a/lib/galaxy/model/migrate/triggers/history_update_time_field.py +++ b/lib/galaxy/model/triggers/history_update_time_field.py @@ -2,7 +2,7 @@ Database trigger installation and removal """ -from galaxy.model.migrate.versions.util import execute_statements +from galaxy.model.triggers.utils import execute_statements def install_timestamp_triggers(engine): diff --git a/lib/galaxy/model/migrate/triggers/update_audit_table.py b/lib/galaxy/model/triggers/update_audit_table.py similarity index 98% rename from lib/galaxy/model/migrate/triggers/update_audit_table.py rename to lib/galaxy/model/triggers/update_audit_table.py index 59f69f6f5acf..1f88fe069aef 100644 --- a/lib/galaxy/model/migrate/triggers/update_audit_table.py +++ b/lib/galaxy/model/triggers/update_audit_table.py @@ -1,4 +1,4 @@ -from galaxy.model.migrate.versions.util import execute_statements +from galaxy.model.triggers.utils import execute_statements # function name prefix fn_prefix = "fn_audit_history_by" diff --git a/lib/galaxy/model/triggers/utils.py b/lib/galaxy/model/triggers/utils.py new file mode 100644 index 000000000000..702165c02e6a --- /dev/null +++ b/lib/galaxy/model/triggers/utils.py @@ -0,0 +1,8 @@ +from sqlalchemy import DDL + + +def execute_statements(engine, raw_sql): + statements = raw_sql if isinstance(raw_sql, list) else [raw_sql] + for sql in statements: + cmd = DDL(sql) + cmd.execute(bind=engine) diff --git a/lib/galaxy/structured_app.py b/lib/galaxy/structured_app.py index 31d98c0bce17..cac460811cbd 100644 --- a/lib/galaxy/structured_app.py +++ b/lib/galaxy/structured_app.py @@ -72,7 +72,6 @@ class MinimalToolApp(BasicApp): class MinimalApp(BasicSharedApp): is_webapp: bool # is_webapp will be set to true when building WSGI app - new_installation: bool tag_handler: GalaxyTagHandler model: GalaxyModelMapping install_model: ModelMapping @@ -115,7 +114,6 @@ class StructuredApp(MinimalManagerApp): """ is_webapp: bool # is_webapp will be set to true when building WSGI app - new_installation: bool tag_handler: GalaxyTagHandler amqp_internal_connection_obj: Optional[Connection] dependency_resolvers_view: DependencyResolversView diff --git a/manage_db.sh b/manage_db.sh index b5a185ab5736..080732a20ac5 100755 --- a/manage_db.sh +++ b/manage_db.sh @@ -1,15 +1,49 @@ #!/bin/sh ####### -# NOTE: To downgrade to a specific version, use something like: -# sh manage_db.sh downgrade --version=3 +# The purpose of this script is to preserve the legacy interface provided by +# manage_db.sh, which was a thin wrapper around SQLAlchemy Migrate. Unless you +# need to use the legacy interface, to manage migrations of the "galaxy" and +# "install" databases, you are encouraged to use run_alembic.sh directly and +# take advantage of Alembic's command line options. +# +# Use this script to upgrade or downgrade your database. +# Database options: galaxy (default), install, tool_shed +# To pass a galaxy config file, you may use `-c|--config|--config-file your-config-file` + +# To upgrade or downgrade to some version X: +# sh manage_db.sh [upgrade|downgrade] --version=X [tool_shed|install|galaxy] +# +# You may also skip the version argument when upgrading, in which case the database +# will be upgraded to the latest version. +# +# Example 1: upgrade "galaxy" database to version "abc123" using default config: +# sh manage_db.sh upgrade --version=xyz567 +# +# Example 2: downgrade "install" database to version "xyz789" passing config file "mygalaxy.yml": +# sh manage_db.sh downgrade --version=abc123 -c mygalaxy.yml install +# +# Example 3: upgrade "galaxy" database to latest version using defualt config: +# sh manage_db.sh upgrade +# +# (Note: Tool Shed migrations use the legacy migrations system, so we check the +# last argument (the database) to invoke the appropriate script. Therefore, if +# you don't specify the database (galaxy is used by default) and pass a config +# file, your config file should not be named `tool_shed`.) ####### +ALEMBIC_CONFIG='lib/galaxy/model/migrations/alembic.ini' + cd `dirname $0` . ./scripts/common_startup_functions.sh setup_python -find lib/galaxy/model/migrate/versions -name '*.pyc' -delete -python ./scripts/manage_db.py $@ +for i; do :; done +if [ "$i" = "tool_shed" ]; then + python ./scripts/migrate_toolshed_db.py "$@" tool_shed +else + find lib/galaxy/model/migrations/alembic -name '*.pyc' -delete + python ./scripts/manage_db_adapter.py --alembic-config "$ALEMBIC_CONFIG" "$@" +fi diff --git a/packages/data/MANIFEST.in b/packages/data/MANIFEST.in index e2e6d43c155a..a5e6acd0809e 100644 --- a/packages/data/MANIFEST.in +++ b/packages/data/MANIFEST.in @@ -2,5 +2,5 @@ include *.rst *.txt LICENSE include galaxy/datatypes/set_metadata_tool.xml include galaxy/datatypes/converters/*.xml include galaxy/datatypes/test/* -include galaxy/model/migrate/migrate.cfg -include galaxy/model/tool_shed_install/migrate/migrate.cfg +include galaxy/model/migrations/alembic.ini +include galaxy/model/migrations/alembic/script.py.mako diff --git a/packages/data/requirements.txt b/packages/data/requirements.txt index 1ec7811f1ffd..2b559ebf2a0e 100644 --- a/packages/data/requirements.txt +++ b/packages/data/requirements.txt @@ -2,6 +2,7 @@ galaxy-files galaxy-objectstore galaxy-sequence-utils galaxy-util[template] +alembic==1.7.4; python_version >= "3.6" bdbag bx-python contextvars; python_version >= "3.6" and python_version < "3.7" @@ -15,7 +16,6 @@ pydantic pysam social-auth-core[openidconnect]==4.0.3 SQLAlchemy>=1.4.25,<2 -sqlalchemy-migrate tifffile<=2020.9.3 # Last version compatible with python 3.6 typing-extensions WebOb diff --git a/packages/data/setup.py b/packages/data/setup.py index 8e37013fb5ec..84e39ba18808 100644 --- a/packages/data/setup.py +++ b/packages/data/setup.py @@ -30,28 +30,28 @@ def get_var(var_name): TEST_DIR = "tests" PACKAGES = [ - "galaxy", - "galaxy.datatypes", - "galaxy.datatypes.converters", - "galaxy.datatypes.dataproviders", - "galaxy.datatypes.display_applications", - "galaxy.datatypes.util", - "galaxy.datatypes.test", - "galaxy.model", - "galaxy.model.dataset_collections", - "galaxy.model.dataset_collections.types", - "galaxy.model.migrate", - "galaxy.model.migrate.triggers", - "galaxy.model.migrate.versions", - "galaxy.model.orm", - "galaxy.model.store", - "galaxy.model.tool_shed_install", - "galaxy.model.tool_shed_install.migrate", - "galaxy.model.tool_shed_install.migrate.versions", - "galaxy.model.unittest_utils", - "galaxy.model.view", - "galaxy.quota", - "galaxy.security", + 'galaxy', + 'galaxy.datatypes', + 'galaxy.datatypes.converters', + 'galaxy.datatypes.dataproviders', + 'galaxy.datatypes.display_applications', + 'galaxy.datatypes.util', + 'galaxy.datatypes.test', + 'galaxy.model', + 'galaxy.model.dataset_collections', + 'galaxy.model.dataset_collections.types', + 'galaxy.model.migrations', + 'galaxy.model.migrations.alembic', + 'galaxy.model.migrations.alembic.versions_gxy', + 'galaxy.model.migrations.alembic.versions_tsi', + 'galaxy.model.orm', + 'galaxy.model.store', + 'galaxy.model.tool_shed_install', + 'galaxy.model.triggers', + 'galaxy.model.unittest_utils', + 'galaxy.model.view', + 'galaxy.quota', + 'galaxy.security', ] ENTRY_POINTS = """ [console_scripts] diff --git a/packages/test.sh b/packages/test.sh index 5d7681e03fcd..a9a74cfa57ac 100755 --- a/packages/test.sh +++ b/packages/test.sh @@ -51,7 +51,9 @@ for ((i=0; i<${#PACKAGE_DIRS[@]}; i++)); do pip install -r test-requirements.txt - pytest --doctest-modules galaxy tests + # Prevent execution of alembic/env.py at test collection stage (alembic.context not set) + unit_extra='--doctest-modules --ignore galaxy/model/migrations/alembic' + pytest $unit_extra galaxy tests make mypy cd .. done diff --git a/pyproject.toml b/pyproject.toml index c1125a8c9f49..462eb2d395cf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,6 +30,7 @@ url = "https://wheels.galaxyproject.org/simple" [tool.poetry.dependencies] a2wsgi = "*" aiofiles = "*" +alembic = "*" Babel = "*" bdbag = "*" Beaker = "1.11.0" @@ -91,7 +92,7 @@ Routes = "*" social-auth-core = {version = "==4.0.3", extras = ["openidconnect"]} sortedcontainers = "*" SQLAlchemy = ">=1.4.25,<2" -sqlalchemy-migrate = "*" +sqlalchemy-migrate = "*" # required to support tool shed sqlitedict = "*" sqlparse = "*" starlette = "*" diff --git a/run_alembic.sh b/run_alembic.sh new file mode 100755 index 000000000000..3349bbe63c36 --- /dev/null +++ b/run_alembic.sh @@ -0,0 +1,53 @@ +#!/bin/sh + +####### +# Use this script to manage Galaxy and Tool Shed Install migrations. +# (Use the legacy manage_db.sh script to manage Tool Shed migrations.) +# +# NOTE: If your database is empty OR is not under Alembic version control, +# use create_db.sh instead. +# +# We use branch labels to distinguish between the galaxy and the tool_shed_install models, +# so in most cases you'll need to identify the branch to which your command should be applied. +# Use these identifiers: `gxy` for galaxy, and `tsi` for tool_shed_install. +# +# To create a revision for galaxy: +# ./run_alembic.sh revision --head=gxy@head -m "your description" +# +# To create a revision for tool_shed_install: +# ./run_alembic.sh revision --head=tsi@head -m "your description" +# +# To upgrade: +# ./run_alembic.sh upgrade gxy@head # upgrade gxy to head revision +# ./run_alembic.sh upgrade gxy@+1 # upgrade gxy to 1 revision above current +# ./run_alembic.sh upgrade [revision identifier] # upgrade gxy to a specific revision +# ./run_alembic.sh upgrade [revision identifier]+1 # upgrade gxy to 1 revision above specific revision +# ./run_alembic.sh upgrade heads # upgrade gxy and tsi to head revisions +# +# To downgrade: +# ./run_alembic.sh downgrade gxy@base # downgrade gxy to base (empty db with empty alembic table) +# ./run_alembic.sh downgrade gxy@-1 # downgrade gxy to 1 revision below current +# ./run_alembic.sh downgrade [revision identifier] # downgrade gxy to a specific revision +# ./run_alembic.sh downgrade [revision identifier]-1 # downgrade gxy to 1 revision below specific revision +# +# To pass a galaxy config file, use `--galaxy-config` +# +# You may also override the galaxy database url and/or the +# tool shed install database url, as well as the database_template +# and database_encoding configuration options with env vars: +# GALAXY_CONFIG_OVERRIDE_DATABASE_CONNECTION=my-db-url ./run_alembic.sh ... +# GALAXY_INSTALL_CONFIG_OVERRIDE_DATABASE_CONNECTION=my-other-db-url ./run_alembic.sh ... +# +# For more options, see Alembic's documentation at https://alembic.sqlalchemy.org +####### + +ALEMBIC_CONFIG='lib/galaxy/model/migrations/alembic.ini' + +cd `dirname $0` + +. ./scripts/common_startup_functions.sh + +setup_python + +find lib/galaxy/model/migrations/alembic -name '*.pyc' -delete +python ./scripts/migrate_db.py --config "$ALEMBIC_CONFIG" "$@" diff --git a/run_tests.sh b/run_tests.sh index c146f3b388b0..5c407b52cbd4 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -503,7 +503,7 @@ do -u|-unit|--unit) report_file="run_unit_tests.html" test_script="pytest" - unit_extra='--doctest-modules --ignore lib/galaxy/web/proxy/js/node_modules/ --ignore lib/tool_shed/webapp/controllers --ignore lib/galaxy/jobs/runners/chronos.py --ignore lib/tool_shed/webapp/model/migrate --ignore lib/galaxy/tools/bundled --ignore lib/galaxy_test --ignore lib/tool_shed/test' + unit_extra='--doctest-modules --ignore lib/galaxy/web/proxy/js/node_modules/ --ignore lib/tool_shed/webapp/controllers --ignore lib/galaxy/jobs/runners/chronos.py --ignore lib/tool_shed/webapp/model/migrate --ignore lib/galaxy/tools/bundled --ignore lib/galaxy_test --ignore lib/tool_shed/test --ignore lib/galaxy/model/migrations/alembic' if [ $# -gt 1 ]; then unit_extra="$unit_extra $2" shift 2 diff --git a/scripts/check_model.py b/scripts/check_model.py index 58819421495b..3f411963270a 100644 --- a/scripts/check_model.py +++ b/scripts/check_model.py @@ -17,6 +17,7 @@ from galaxy.model import mapping from galaxy.model.orm.scripts import get_config +from galaxy.model.tool_shed_install import mapping as tsi_mapping IndexTuple = namedtuple("IndexTuple", "table column_names") @@ -37,17 +38,22 @@ def load_indexes(metadata): indexes[index_tuple] = index.name return indexes - # load metadata from mapping.py + # load metadata from mapping.py and tool_shed_install/mapping.py metadata = mapping.metadata mapping_indexes = load_indexes(metadata) + tsi_metadata = tsi_mapping.metadata + tsi_mapping_indexes = load_indexes(tsi_metadata) + # create EMPTY metadata, then load from database db_url = get_config(sys.argv)["db_url"] metadata = MetaData(bind=create_engine(db_url)) metadata.reflect() indexes_in_db = load_indexes(metadata) - missing_indexes = set(mapping_indexes.keys()) - set(indexes_in_db.keys()) + all_indexes = set(mapping_indexes.keys()) | set(tsi_mapping_indexes.keys()) + + missing_indexes = all_indexes - set(indexes_in_db.keys()) if missing_indexes: return [(mapping_indexes[index], index.table, index.column_names) for index in missing_indexes] diff --git a/scripts/create_db.py b/scripts/create_db.py index c3e986c8cfbe..c9d5b750084e 100755 --- a/scripts/create_db.py +++ b/scripts/create_db.py @@ -1,19 +1,12 @@ """ -Creates the initial galaxy database schema using the settings defined in -config/galaxy.ini. - -This script is also wrapped by create_db.sh. - -.. note: pass '-c /location/to/your_config.ini' for non-standard ini file -locations. - -.. note: if no database_connection is set in galaxy.ini, the default, sqlite -database will be constructed. - Using the database_file setting in galaxy.ini will create the file at the - settings location (??) - -.. seealso: galaxy.ini, specifically the settings: database_connection and -database file +This script retrieves relevant configuration values and verifies the state of +the Galaxy and Tool Shed Install database(s). +There may be one combined database (galaxy and tool shed install) or two +separate databases. +If the database does not exist or is empty, it will be created and initialized. +(See inline comments in lib/galaxy/model/migrations/__init__.py for details on +how other database states are handled). +It is wrapped by create_db.sh (see that file for usage). """ import logging import os.path @@ -21,23 +14,16 @@ sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, "lib"))) -from galaxy.model.migrate.check import create_or_verify_database as create_db -from galaxy.model.orm.scripts import get_config -from galaxy.model.tool_shed_install.migrate.check import create_or_verify_database as create_install_db -from tool_shed.webapp.model.migrate.check import create_or_verify_database as create_tool_shed_db +from galaxy.model.migrations import verify_databases_via_script +from galaxy.model.migrations.scripts import get_configuration logging.basicConfig(level=logging.DEBUG) log = logging.getLogger(__name__) def invoke_create(): - config = get_config(sys.argv) - if config["database"] == "galaxy": - create_db(config["db_url"], config["config_file"], map_install_models=not config["install_database_connection"]) - elif config["database"] == "tool_shed": - create_tool_shed_db(config["db_url"]) - elif config["database"] == "install": - create_install_db(config["db_url"]) + gxy_config, tsi_config, is_auto_migrate = get_configuration(sys.argv, os.getcwd()) + verify_databases_via_script(gxy_config, tsi_config, is_auto_migrate) if __name__ == "__main__": diff --git a/scripts/create_toolshed_db.py b/scripts/create_toolshed_db.py new file mode 100755 index 000000000000..aee1fe839f0d --- /dev/null +++ b/scripts/create_toolshed_db.py @@ -0,0 +1,29 @@ +""" +Creates the initial tool shed database schema using the settings defined in config/tool_shed.yml. + +Note: pass '-c /location/to/your_config.yml' for non-standard ini file locations. + +Note: if no database_connection is set in tool_shed.yml, the default, sqlite database will be constructed. + +This script is also wrapped by create_ts_db.sh. +""" +import logging +import os.path +import sys + +sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, "lib"))) + +from galaxy.model.orm.scripts import get_config +from tool_shed.webapp.model.migrate.check import create_or_verify_database as create_tool_shed_db + +logging.basicConfig(level=logging.DEBUG) +log = logging.getLogger(__name__) + + +def invoke_create(): + config = get_config(sys.argv) + create_tool_shed_db(config["db_url"]) + + +if __name__ == "__main__": + invoke_create() diff --git a/scripts/manage_db.py b/scripts/manage_db.py index 4bbf2eb4abf4..f68580ee2c6e 100644 --- a/scripts/manage_db.py +++ b/scripts/manage_db.py @@ -1,22 +1,58 @@ -""" This script parses Galaxy or Tool Shed config file for database connection -and then delegates to sqlalchemy_migrate shell main function in -migrate.versioning.shell. """ +""" +This script is legacy and should not be used directly. It is intended to be +used by the ansible galaxy and toolshed roles. For managing the database, please +consult manage_db.sh. +""" import logging import os.path import sys -from migrate.versioning.shell import main - sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, "lib"))) - -from galaxy.model.orm.scripts import get_config +from galaxy.model.migrations.scripts import LegacyManageDb logging.basicConfig(level=logging.DEBUG) log = logging.getLogger(__name__) -def invoke_migrate_main(): - # Migrate has its own args, so cannot use argparse +def run(): + """ + If the target database is 'tool_shed', delegate to sqlalchemy migrate. + Otherwise, handle with Alembic. + """ + if sys.argv[-1] == "tool_shed": + _run_sqlalchemy_migrate_on_toolshed() + else: + arg = _get_command_argument() + lmdb = LegacyManageDb() + if arg == "version": + result = lmdb.get_gxy_version() + elif arg == "db_version": + result = lmdb.get_gxy_db_version() + else: + result = lmdb.run_upgrade() + if result: + print(result) + + +def _get_command_argument(): + """ + If last argument is a valid command, pop and return it; otherwise raise exception. + """ + arg = sys.argv[-1] + if arg in ["version", "db_version", "upgrade"]: + return arg + else: + raise Exception("Invalid command argument; should be: 'version', 'db_version', or 'upgrade'") + + +def _run_sqlalchemy_migrate_on_toolshed(): + # This is the only case when we use SQLAlchemy Migrate. + # This intentionally duplicates the code in `migrate_toolshed_db.py`. + # The dependency on `migrate` should be removed prior to the move to SQLAlchemy 2.0. + from migrate.versioning.shell import main + + from galaxy.model.orm.scripts import get_config + config = get_config(sys.argv, use_argparse=False, cwd=os.getcwd()) db_url = config["db_url"] repo = config["repo"] @@ -25,4 +61,4 @@ def invoke_migrate_main(): if __name__ == "__main__": - invoke_migrate_main() + run() diff --git a/scripts/manage_db_adapter.py b/scripts/manage_db_adapter.py new file mode 100644 index 000000000000..8026c54bc9af --- /dev/null +++ b/scripts/manage_db_adapter.py @@ -0,0 +1,38 @@ +""" +This script is intended to be invoked by the legacy manage_db.sh script. +It translates the arguments supplied to manage_db.sh into the format used +by migrate_db.py. + +INPUT: | OUTPUT: +---------------------------------------------------------- +upgrade --version=foo | upgrade foo +upgrade --version foo | upgrade foo +upgrade | upgrade heads (if using a combined db for galaxy and install) +upgrade | upgrade gxy@head (if using separate dbs for galaxy and install) +upgrade install | upgrade tsi@head +upgrade --version=bar install | upgrade bar +upgrade -c path-to-galaxy.yml | upgrade --galaxy-config path-to-galaxy.yml gxy@head + +The converted sys.argv will include `-c path-to-alembic.ini`. +The optional `-c` argument name is renamed to `--galaxy-config`. +""" + +import os +import sys + +sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, "lib"))) + +from galaxy.model.migrations.scripts import ( + invoke_alembic, + LegacyScripts, +) + + +def run(): + ls = LegacyScripts(sys.argv, os.getcwd()) + ls.run() + invoke_alembic() + + +if __name__ == "__main__": + run() diff --git a/scripts/migrate_db.py b/scripts/migrate_db.py new file mode 100755 index 000000000000..78f976ff1580 --- /dev/null +++ b/scripts/migrate_db.py @@ -0,0 +1,29 @@ +""" +This script retrieves relevant configuration values and invokes +the Alembic console runner. +It is wrapped by run_alembic.sh (see that file for usage). +""" +import logging +import os.path +import sys + +sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, "lib"))) + +from galaxy.model.migrations.scripts import ( + add_db_urls_to_command_arguments, + get_configuration, + invoke_alembic, +) + +logging.basicConfig(level=logging.DEBUG) +log = logging.getLogger(__name__) + + +def run(): + gxy_config, tsi_config, _ = get_configuration(sys.argv, os.getcwd()) + add_db_urls_to_command_arguments(sys.argv, gxy_config.url, tsi_config.url) + invoke_alembic() + + +if __name__ == "__main__": + run() diff --git a/scripts/migrate_toolshed_db.py b/scripts/migrate_toolshed_db.py new file mode 100755 index 000000000000..26052126ed6a --- /dev/null +++ b/scripts/migrate_toolshed_db.py @@ -0,0 +1,31 @@ +""" +This script parses the Tool Shed config file for database connection +and then delegates to sqlalchemy_migrate shell main function in +migrate.versioning.shell. +It is wrapped by manage_db.sh (see that file for usage). +""" +import logging +import os.path +import sys + +from migrate.versioning.shell import main + +sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, "lib"))) + +from galaxy.model.orm.scripts import get_config + +logging.basicConfig(level=logging.DEBUG) +log = logging.getLogger(__name__) + + +def invoke_migrate_main(): + # Migrate has its own args, so cannot use argparse + config = get_config(sys.argv, use_argparse=False, cwd=os.getcwd()) + db_url = config["db_url"] + repo = config["repo"] + + main(repository=repo, url=db_url) + + +if __name__ == "__main__": + invoke_migrate_main() diff --git a/scripts/tool_shed/bootstrap_tool_shed/bootstrap_tool_shed.sh b/scripts/tool_shed/bootstrap_tool_shed/bootstrap_tool_shed.sh index ce7005f9fc14..93e830c45483 100755 --- a/scripts/tool_shed/bootstrap_tool_shed/bootstrap_tool_shed.sh +++ b/scripts/tool_shed/bootstrap_tool_shed/bootstrap_tool_shed.sh @@ -27,7 +27,7 @@ fi echo "Bootstrapping from tool shed at $tool_shed." echo -n "Creating database... " -python scripts/create_db.py tool_shed +python scripts/create_toolshed_db.py tool_shed if [ $? -eq 0 ] ; then echo "done." diff --git a/test/integration/test_scripts.py b/test/integration/test_scripts.py index 4837e2ec0acf..7eb67daec30f 100644 --- a/test/integration/test_scripts.py +++ b/test/integration/test_scripts.py @@ -151,8 +151,8 @@ def test_secret_decoder_ring(self): assert output.strip() == "1" def test_database_scripts(self): - self._scripts_check_argparse_help("create_db.py") - self._scripts_check_argparse_help("manage_db.py") + self._scripts_check_argparse_help("create_toolshed_db.py") + self._scripts_check_argparse_help("migrate_toolshed_db.py") # TODO: test creating a smaller database - e.g. tool install database based on fresh # config file. diff --git a/lib/galaxy/model/migrate/triggers/__init__.py b/test/unit/data/model/migrations/__init__.py similarity index 100% rename from lib/galaxy/model/migrate/triggers/__init__.py rename to test/unit/data/model/migrations/__init__.py diff --git a/test/unit/data/model/migrations/common.py b/test/unit/data/model/migrations/common.py new file mode 100644 index 000000000000..1e5d418a92f1 --- /dev/null +++ b/test/unit/data/model/migrations/common.py @@ -0,0 +1,115 @@ +import os +import uuid +from contextlib import contextmanager +from typing import ( + Callable, + Iterator, + NewType, + Optional, +) + +import pytest +from sqlalchemy import create_engine +from sqlalchemy.engine import ( + Engine, + make_url, +) +from sqlalchemy.sql.compiler import IdentifierPreparer + +from galaxy.model.database_utils import create_database + +DbUrl = NewType("DbUrl", str) + +# Fixture and helper functions used to generate urls for postgresql and sqlite databases + + +@pytest.fixture +def url_factory(tmp_directory: str) -> Callable[[], DbUrl]: + """ + Return a factory function that produces a database url with a unique database name. + If _get_connection_url() returns a value, the database is postgresql; otherwise, it's + sqlite (referring to a location witin the /tmp directory). + """ + + def url() -> DbUrl: + database = _generate_unique_database_name() + connection_url = _get_connection_url() + if connection_url: + return _make_postgres_db_url(DbUrl(connection_url), database) + else: + return _make_sqlite_db_url(tmp_directory, database) + + return url + + +@contextmanager +def disposing_engine(url: DbUrl) -> Iterator[Engine]: + """Context manager for engine that disposes of its connection pool on exit.""" + engine = create_engine(url) + try: + yield engine + finally: + engine.dispose() + + +@contextmanager +def create_and_drop_database(url: DbUrl) -> Iterator[None]: + """ + Context manager that creates a database. If the database is postgresql, it is dropped on exit; + a sqlite database should be removed automatically by tempfile. + """ + try: + create_database(url) + yield + finally: + if _is_postgres(url): + _drop_postgres_database(url) + + +@contextmanager +def drop_database(url: DbUrl) -> Iterator[None]: + """ + Context manager that ensures a postgres database identified by url is dropped on exit; + a sqlite database should be removed automatically by tempfile. + """ + try: + yield + finally: + if _is_postgres(url): + _drop_postgres_database(url) + + +def _generate_unique_database_name() -> str: + return f"galaxytest_{uuid.uuid4().hex}" + + +def _get_connection_url() -> Optional[str]: + return os.environ.get("GALAXY_TEST_DBURI") + + +def _is_postgres(url: DbUrl) -> bool: + return url.startswith("postgres") + + +def _make_sqlite_db_url(tmpdir: str, database: str) -> DbUrl: + path = os.path.join(tmpdir, database) + return DbUrl(f"sqlite:///{path}") + + +def _make_postgres_db_url(connection_url: DbUrl, database: str) -> DbUrl: + url = make_url(connection_url) + url = url.set(database=database) + return DbUrl(str(url)) + + +def _drop_postgres_database(url: DbUrl) -> None: + db_url = make_url(url) + database = db_url.database + connection_url = db_url.set(database="postgres") + engine = create_engine(connection_url, isolation_level="AUTOCOMMIT") + preparer = IdentifierPreparer(engine.dialect) + database = preparer.quote(database) + stmt = f"DROP DATABASE IF EXISTS {database}" + with engine.connect() as conn: + conn.execute(stmt) + engine.dispose() diff --git a/test/unit/data/model/migrations/conftest.py b/test/unit/data/model/migrations/conftest.py new file mode 100644 index 000000000000..6e2a67e95565 --- /dev/null +++ b/test/unit/data/model/migrations/conftest.py @@ -0,0 +1,307 @@ +import tempfile + +import pytest +import sqlalchemy as sa + +# Helper fixtures + + +@pytest.fixture(scope="module") +def tmp_directory(): + with tempfile.TemporaryDirectory() as tmp_dir: + yield tmp_dir + + +# Fixtures: metadata containing one or more tables and representing database state. +# Used to load a database with a given state. +# Each state has 3 versions, distinguished by suffix: +# - gxy (galaxy database that holds gxy* and migration version tables) +# - tsi (tool_shed_install database that holds tsi* and migration version tables) +# - combined (combined database that holds gxy*, tsi*, and migration version tables) +# +# The following states are represented: +# +# State 1: Non-empty database, no version table. +# (Most ancient state) +# State 2: SQLAlchemy Migrate version table added. +# (Oldest state versioned with SQLAlchemy Migrate) +# State 3: New table added. +# (Last (most recent) state versioned with SQLAlchemy Migrate) +# State 4: Alembic version table added. +# (Oldest state versioned with Alembic) +# State 5: SQLAlchemy Migrate version table removed. +# (Oldest state versioned with Alembic that does not include the SQLAchemy Migrate version table) +# State 6: New table added. +# (Most recent state versioned with Alembic. This is the current state) +# +# Additional edge case: gxy at state3, tsi has no SQLAlchemy Migrate table. +# +# (State 0 assumes an empty database, so it needs no state fixtures.) + + +# state 1 +@pytest.fixture +def metadata_state1_gxy(gxy_table1): + metadata = sa.MetaData() + gxy_table1(metadata) + return metadata + + +@pytest.fixture +def metadata_state1_tsi(tsi_table1): + metadata = sa.MetaData() + tsi_table1(metadata) + return metadata + + +@pytest.fixture +def metadata_state1_combined(gxy_table1, tsi_table1): + metadata = sa.MetaData() + gxy_table1(metadata) + tsi_table1(metadata) + return metadata + + +# state 2 +@pytest.fixture +def metadata_state2_gxy(gxy_table1, sqlalchemymigrate_table): + metadata = sa.MetaData() + gxy_table1(metadata) + sqlalchemymigrate_table(metadata) + return metadata + + +@pytest.fixture +def metadata_state2_tsi(tsi_table1, sqlalchemymigrate_table): + metadata = sa.MetaData() + tsi_table1(metadata) + sqlalchemymigrate_table(metadata) + return metadata + + +@pytest.fixture +def metadata_state2_combined(gxy_table1, tsi_table1, sqlalchemymigrate_table): + metadata = sa.MetaData() + gxy_table1(metadata) + tsi_table1(metadata) + sqlalchemymigrate_table(metadata) + return metadata + + +# state 3 +@pytest.fixture +def metadata_state3_gxy(gxy_table1, gxy_table2, sqlalchemymigrate_table): + metadata = sa.MetaData() + gxy_table1(metadata) + gxy_table2(metadata) + sqlalchemymigrate_table(metadata) + return metadata + + +@pytest.fixture +def metadata_state3_tsi(tsi_table1, tsi_table2, sqlalchemymigrate_table): + metadata = sa.MetaData() + tsi_table1(metadata) + tsi_table2(metadata) + sqlalchemymigrate_table(metadata) + return metadata + + +@pytest.fixture +def metadata_state3_combined(gxy_table1, gxy_table2, tsi_table1, tsi_table2, sqlalchemymigrate_table): + metadata = sa.MetaData() + gxy_table1(metadata) + gxy_table2(metadata) + tsi_table1(metadata) + tsi_table2(metadata) + sqlalchemymigrate_table(metadata) + return metadata + + +# state 4 +@pytest.fixture +def metadata_state4_gxy(gxy_table1, gxy_table2, sqlalchemymigrate_table, alembic_table): + metadata = sa.MetaData() + gxy_table1(metadata) + gxy_table2(metadata) + sqlalchemymigrate_table(metadata) + alembic_table(metadata) + return metadata + + +@pytest.fixture +def metadata_state4_tsi(tsi_table1, tsi_table2, sqlalchemymigrate_table, alembic_table): + metadata = sa.MetaData() + tsi_table1(metadata) + tsi_table2(metadata) + sqlalchemymigrate_table(metadata) + alembic_table(metadata) + return metadata + + +@pytest.fixture +def metadata_state4_combined(gxy_table1, gxy_table2, tsi_table1, tsi_table2, sqlalchemymigrate_table, alembic_table): + metadata = sa.MetaData() + gxy_table1(metadata) + gxy_table2(metadata) + tsi_table1(metadata) + tsi_table2(metadata) + sqlalchemymigrate_table(metadata) + alembic_table(metadata) + return metadata + + +# state 5 +@pytest.fixture +def metadata_state5_gxy(gxy_table1, gxy_table2, alembic_table): + metadata = sa.MetaData() + gxy_table1(metadata) + gxy_table2(metadata) + alembic_table(metadata) + return metadata + + +@pytest.fixture +def metadata_state5_tsi(tsi_table1, tsi_table2, alembic_table): + metadata = sa.MetaData() + tsi_table1(metadata) + tsi_table2(metadata) + alembic_table(metadata) + return metadata + + +@pytest.fixture +def metadata_state5_combined(gxy_table1, gxy_table2, tsi_table1, tsi_table2, alembic_table): + metadata = sa.MetaData() + gxy_table1(metadata) + gxy_table2(metadata) + tsi_table1(metadata) + tsi_table2(metadata) + alembic_table(metadata) + return metadata + + +# state 6 +@pytest.fixture +def metadata_state6_gxy(gxy_table1, gxy_table2, gxy_table3, alembic_table): + metadata = sa.MetaData() + gxy_table1(metadata) + gxy_table2(metadata) + gxy_table3(metadata) + alembic_table(metadata) + return metadata + + +@pytest.fixture +def metadata_state6_tsi(tsi_table1, tsi_table2, tsi_table3, alembic_table): + metadata = sa.MetaData() + tsi_table1(metadata) + tsi_table2(metadata) + tsi_table3(metadata) + alembic_table(metadata) + return metadata + + +@pytest.fixture +def metadata_state6_combined(gxy_table1, gxy_table2, gxy_table3, tsi_table1, tsi_table2, tsi_table3, alembic_table): + metadata = sa.MetaData() + gxy_table1(metadata) + gxy_table2(metadata) + gxy_table3(metadata) + tsi_table1(metadata) + tsi_table2(metadata) + tsi_table3(metadata) + alembic_table(metadata) + return metadata + + +@pytest.fixture +def metadata_state6_gxy_state3_tsi_no_sam( + gxy_table1, gxy_table2, gxy_table3, tsi_table1, tsi_table2, tsi_table3, alembic_table +): + # This does NOT include sqlalchemymigrate_table (sam) + metadata = sa.MetaData() + gxy_table1(metadata) + gxy_table2(metadata) + gxy_table3(metadata) + tsi_table1(metadata) + tsi_table2(metadata) + alembic_table(metadata) + return metadata + + +# Fixture factories: metadata containing one table each. +# Used to compose metadata representing database state. +# (The `_factory` suffix is ommitted to keep the code less verbose) + + +@pytest.fixture +def gxy_table1(): + def make_table(metadata): + return sa.Table("gxy_table1", metadata, sa.Column("id", sa.Integer, primary_key=True)) + + return make_table + + +@pytest.fixture +def gxy_table2(): + def make_table(metadata): + return sa.Table("gxy_table2", metadata, sa.Column("id", sa.Integer, primary_key=True)) + + return make_table + + +@pytest.fixture +def gxy_table3(): + def make_table(metadata): + return sa.Table("gxy_table3", metadata, sa.Column("id", sa.Integer, primary_key=True)) + + return make_table + + +@pytest.fixture +def tsi_table1(): + def make_table(metadata): + return sa.Table("tsi_table1", metadata, sa.Column("id", sa.Integer, primary_key=True)) + + return make_table + + +@pytest.fixture +def tsi_table2(): + def make_table(metadata): + return sa.Table("tsi_table2", metadata, sa.Column("id", sa.Integer, primary_key=True)) + + return make_table + + +@pytest.fixture +def tsi_table3(): + def make_table(metadata): + return sa.Table("tsi_table3", metadata, sa.Column("id", sa.Integer, primary_key=True)) + + return make_table + + +@pytest.fixture +def alembic_table(): + def make_table(metadata): + table = sa.Table("alembic_version", metadata, sa.Column("version_num", sa.String(250), primary_key=True)) + return table + + return make_table + + +@pytest.fixture +def sqlalchemymigrate_table(): + def make_table(metadata): + table = sa.Table( + "migrate_version", + metadata, + sa.Column("repository_id", sa.String(250), primary_key=True), + sa.Column("repository_path", sa.Text), + sa.Column("version", sa.Integer), + ) + return table + + return make_table diff --git a/test/unit/data/model/migrations/test_migrations.py b/test/unit/data/model/migrations/test_migrations.py new file mode 100644 index 000000000000..97f2746d3c27 --- /dev/null +++ b/test/unit/data/model/migrations/test_migrations.py @@ -0,0 +1,1189 @@ +import os +from typing import Union + +import alembic +import pytest +from alembic.config import Config +from sqlalchemy import MetaData + +from galaxy.model import migrations +from galaxy.model.database_utils import database_exists +from galaxy.model.migrations import ( + AlembicManager, + DatabaseStateCache, + DatabaseStateVerifier, + get_last_sqlalchemymigrate_version, + GXY, + IncorrectVersionError, + listify, + load_metadata, + NoVersionTableError, + OutdatedDatabaseError, + scripts, + SQLALCHEMYMIGRATE_LAST_VERSION_GXY, + SQLALCHEMYMIGRATE_LAST_VERSION_TSI, + SQLALCHEMYMIGRATE_TABLE, + TSI, + verify_databases, +) +from galaxy.model.migrations.scripts import LegacyManageDb +from .common import ( # noqa: F401 (url_factory is a fixture we have to import explicitly) + create_and_drop_database, + disposing_engine, + drop_database, + url_factory, +) + +# Revision numbers from test versions directories +GXY_REVISION_0 = "62695fac6cc0" # oldest/base +GXY_REVISION_1 = "2e8a580bc79a" +GXY_REVISION_2 = "e02cef55763c" # current/head +TSI_REVISION_0 = "1bceec30363a" # oldest/base +TSI_REVISION_1 = "8364ef1cab05" +TSI_REVISION_2 = "0e28bf2fb7b5" # current/head + + +class TestAlembicManager: + def test_is_at_revision__one_head_one_revision(self, url_factory): # noqa: F811 + # Use case: Check if separate tsi database is at a given revision. + db_url = url_factory() + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + revision = GXY_REVISION_0 + assert not AlembicManagerForTests.is_at_revision(engine, revision) + am = AlembicManagerForTests(engine) + am.stamp_revision(revision) + assert AlembicManagerForTests.is_at_revision(engine, revision) + + def test_is_at_revision__two_heads_one_revision(self, url_factory): # noqa: F811 + # Use case: Check if combined gxy and tsi database is at a given gxy revision. + db_url = url_factory() + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + revision = GXY_REVISION_0 + revisions = [GXY_REVISION_0, TSI_REVISION_0] + assert not AlembicManagerForTests.is_at_revision(engine, revision) + am = AlembicManagerForTests(engine) + am.stamp_revision(revisions) + assert AlembicManagerForTests.is_at_revision(engine, revision) + + def test_is_at_revision__two_heads_two_revisions(self, url_factory): # noqa: F811 + # Use case: Check if combined gxy and tsi database is at given gxy and tsi revisions. + db_url = url_factory() + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + revisions = [GXY_REVISION_0, TSI_REVISION_0] + assert not AlembicManagerForTests.is_at_revision(engine, revisions) + am = AlembicManagerForTests(engine) + am.stamp_revision(revisions) + assert AlembicManagerForTests.is_at_revision(engine, revisions) + + def test_is_up_to_date_single_revision(self, url_factory): # noqa: F811 + db_url = url_factory() + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + model = GXY + am = AlembicManagerForTests(engine) + assert not am.is_up_to_date(model) + am.stamp_revision(GXY_REVISION_1) + assert not am.is_up_to_date(model) + am.stamp_revision(GXY_REVISION_2) + assert am.is_up_to_date(model) + + def test_not_is_up_to_date_wrong_model(self, url_factory): # noqa: F811 + db_url = url_factory() + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + am = AlembicManagerForTests(engine) + assert not am.is_up_to_date(GXY) + assert not am.is_up_to_date(TSI) + am.stamp_revision(GXY_REVISION_2) + assert am.is_up_to_date(GXY) + assert not am.is_up_to_date(TSI) + + def test_is_up_to_date_multiple_revisions(self, url_factory): # noqa: F811 + db_url = url_factory() + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + am = AlembicManagerForTests(engine) + assert not am.is_up_to_date(GXY) # False: no head revisions in database + am.stamp_revision([GXY_REVISION_2, TSI_REVISION_2]) + assert am.is_up_to_date(GXY) # True: both are up-to-date + assert am.is_up_to_date(TSI) # True: both are up-to-date + + def test_is_not_up_to_date_multiple_revisions_both(self, url_factory): # noqa: F811 + db_url = url_factory() + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + am = AlembicManagerForTests(engine) + am.stamp_revision([GXY_REVISION_1, TSI_REVISION_1]) + assert not am.is_up_to_date(GXY) # False: both are not up-to-date + assert not am.is_up_to_date(TSI) # False: both are not up-to-date + + def test_is_not_up_to_date_multiple_revisions_one(self, url_factory): # noqa: F811 + db_url = url_factory() + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + am = AlembicManagerForTests(engine) + am.stamp_revision([GXY_REVISION_2, TSI_REVISION_1]) + assert am.is_up_to_date(GXY) # True + assert not am.is_up_to_date(TSI) # False: only one is up-to-date + + def test_is_under_version_control(self, url_factory): # noqa: F811 + db_url = url_factory() + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + am = AlembicManagerForTests(engine) + assert not am.is_under_version_control(GXY) + assert not am.is_under_version_control(TSI) + am.stamp_revision(GXY_REVISION_0) + assert am.is_under_version_control(GXY) + assert not am.is_under_version_control(TSI) + am.stamp_revision(TSI_REVISION_0) + assert am.is_under_version_control(GXY) + assert am.is_under_version_control(TSI) + + def test_get_revision_raises_error_if_revision_not_found(self, url_factory): # noqa: F811 + db_url = url_factory() + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + am = AlembicManagerForTests(engine) + with pytest.raises(alembic.util.exc.CommandError): + am._get_revision("invalid") + + def test_get_model_db_head(self, url_factory): # noqa: F811 + db_url = url_factory() + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + am = AlembicManagerForTests(engine) + revisions = [GXY_REVISION_1, TSI_REVISION_2] + am.stamp_revision(revisions) + db_head = am.get_model_db_head(GXY) + assert db_head == GXY_REVISION_1 # We stamped the db with this GXY revision + + def test_get_model_script_head(self, url_factory): # noqa: F811 + db_url = url_factory() + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + am = AlembicManagerForTests(engine) + script_head = am.get_model_script_head(GXY) + assert script_head == GXY_REVISION_2 # That's the latest GXY revision in our script directory + + +class TestDatabaseStateCache: + def test_is_empty(self, url_factory, metadata_state1_gxy): # noqa: F811 + db_url, metadata = url_factory(), metadata_state1_gxy + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + assert DatabaseStateCache(engine).is_database_empty() + with engine.connect() as conn: + metadata.create_all(bind=conn) + assert not DatabaseStateCache(engine).is_database_empty() + + def test_has_alembic_version_table(self, url_factory, metadata_state4_gxy): # noqa: F811 + db_url, metadata = url_factory(), metadata_state4_gxy + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + assert not DatabaseStateCache(engine).has_alembic_version_table() + with engine.connect() as conn: + metadata.create_all(bind=conn) + assert DatabaseStateCache(engine).has_alembic_version_table() + + def test_has_sqlalchemymigrate_version_table(self, url_factory, metadata_state2_gxy): # noqa: F811 + db_url, metadata = url_factory(), metadata_state2_gxy + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + assert not DatabaseStateCache(engine).has_sqlalchemymigrate_version_table() + with engine.connect() as conn: + metadata.create_all(bind=conn) + assert DatabaseStateCache(engine).has_sqlalchemymigrate_version_table() + + def test_is_last_sqlalchemymigrate_version(self, url_factory, metadata_state2_gxy): # noqa: F811 + db_url = url_factory() + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + load_metadata(metadata_state2_gxy, engine) + load_sqlalchemymigrate_version(db_url, SQLALCHEMYMIGRATE_LAST_VERSION_GXY - 1) + assert not DatabaseStateCache(engine).is_last_sqlalchemymigrate_version( + SQLALCHEMYMIGRATE_LAST_VERSION_GXY + ) + load_sqlalchemymigrate_version(db_url, SQLALCHEMYMIGRATE_LAST_VERSION_GXY) + assert DatabaseStateCache(engine).is_last_sqlalchemymigrate_version(SQLALCHEMYMIGRATE_LAST_VERSION_GXY) + + +# Database fixture tests + + +class TestDatabaseFixtures: + # Verify that database fixtures have the expected state. + # + # The fixtures of the form `db_state#_[gxy|tsi]` are urls that point + # to databases that HAVE BEEN CREATED. Thus, we are not wrapping them here + # in the `create_and_drop_database` context manager: they are wrapped already. + class TestState1: + def test_database_gxy(self, db_state1_gxy, metadata_state1_gxy): + self.verify_state(db_state1_gxy, metadata_state1_gxy) + + def test_database_tsi(self, db_state1_tsi, metadata_state1_tsi): + self.verify_state(db_state1_tsi, metadata_state1_tsi) + + def test_database_combined(self, db_state1_combined, metadata_state1_combined): + self.verify_state(db_state1_combined, metadata_state1_combined) + + def verify_state(self, db_url, metadata): + assert is_metadata_loaded(db_url, metadata) + with disposing_engine(db_url) as engine: + db = DatabaseStateCache(engine) + assert not db.has_sqlalchemymigrate_version_table() + assert not db.has_alembic_version_table() + + class TestState2: + def test_database_gxy(self, db_state2_gxy, metadata_state2_gxy): + self.verify_state(db_state2_gxy, metadata_state2_gxy) + + def test_database_tsi(self, db_state2_tsi, metadata_state2_tsi): + self.verify_state(db_state2_tsi, metadata_state2_tsi) + + def test_database_combined(self, db_state2_combined, metadata_state2_combined): + self.verify_state(db_state2_combined, metadata_state2_combined) + + def verify_state(self, db_url, metadata): + assert is_metadata_loaded(db_url, metadata) + with disposing_engine(db_url) as engine: + db = DatabaseStateCache(engine) + assert db.has_sqlalchemymigrate_version_table() + assert not db.is_last_sqlalchemymigrate_version(SQLALCHEMYMIGRATE_LAST_VERSION_GXY) + assert not db.is_last_sqlalchemymigrate_version(SQLALCHEMYMIGRATE_LAST_VERSION_TSI) + assert not db.has_alembic_version_table() + + class TestState3: + def test_database_gxy(self, db_state3_gxy, metadata_state3_gxy): + self.verify_state(db_state3_gxy, metadata_state3_gxy, SQLALCHEMYMIGRATE_LAST_VERSION_GXY) + + def test_database_tsi(self, db_state3_tsi, metadata_state3_tsi): + self.verify_state(db_state3_tsi, metadata_state3_tsi, SQLALCHEMYMIGRATE_LAST_VERSION_TSI) + + def test_database_combined(self, db_state3_combined, metadata_state3_combined): + self.verify_state(db_state3_combined, metadata_state3_combined, SQLALCHEMYMIGRATE_LAST_VERSION_GXY) + + def verify_state(self, db_url, metadata, last_version): + assert is_metadata_loaded(db_url, metadata) + with disposing_engine(db_url) as engine: + db = DatabaseStateCache(engine) + assert db.has_sqlalchemymigrate_version_table() + assert db.is_last_sqlalchemymigrate_version(last_version) + assert not db.has_alembic_version_table() + + class TestState4: + def test_database_gxy(self, db_state4_gxy, metadata_state4_gxy): + self.verify_state(db_state4_gxy, metadata_state4_gxy, GXY_REVISION_0, SQLALCHEMYMIGRATE_LAST_VERSION_GXY) + + def test_database_tsi(self, db_state4_tsi, metadata_state4_tsi): + self.verify_state(db_state4_tsi, metadata_state4_tsi, TSI_REVISION_0, SQLALCHEMYMIGRATE_LAST_VERSION_TSI) + + def test_database_combined(self, db_state4_combined, metadata_state4_combined): + self.verify_state( + db_state4_combined, + metadata_state4_combined, + [GXY_REVISION_0, TSI_REVISION_0], + SQLALCHEMYMIGRATE_LAST_VERSION_GXY, + ) + + def verify_state(self, db_url, metadata, revision, last_version): + assert is_metadata_loaded(db_url, metadata) + with disposing_engine(db_url) as engine: + db = DatabaseStateCache(engine) + assert db.has_sqlalchemymigrate_version_table() + assert db.is_last_sqlalchemymigrate_version(last_version) + assert db.has_alembic_version_table() + assert AlembicManagerForTests.is_at_revision(engine, revision) + + class TestState5: + def test_database_gxy(self, db_state5_gxy, metadata_state5_gxy): + self.verify_state(db_state5_gxy, metadata_state5_gxy, GXY_REVISION_1) + + def test_database_tsi(self, db_state5_tsi, metadata_state5_tsi): + self.verify_state(db_state5_tsi, metadata_state5_tsi, TSI_REVISION_1) + + def test_database_combined(self, db_state5_combined, metadata_state5_combined): + self.verify_state(db_state5_combined, metadata_state5_combined, [GXY_REVISION_1, TSI_REVISION_1]) + + def verify_state(self, db_url, metadata, revision): + assert is_metadata_loaded(db_url, metadata) + with disposing_engine(db_url) as engine: + db = DatabaseStateCache(engine) + assert not db.has_sqlalchemymigrate_version_table() + assert db.has_alembic_version_table() + assert AlembicManagerForTests.is_at_revision(engine, revision) + + class TestState6: + def test_database_gxy(self, db_state6_gxy, metadata_state6_gxy): + self.verify_state(db_state6_gxy, metadata_state6_gxy, GXY_REVISION_2) + + def test_database_tsi(self, db_state6_tsi, metadata_state6_tsi): + self.verify_state(db_state6_tsi, metadata_state6_tsi, TSI_REVISION_2) + + def test_database_combined(self, db_state6_combined, metadata_state6_combined): + self.verify_state(db_state6_combined, metadata_state6_combined, [GXY_REVISION_2, TSI_REVISION_2]) + + def verify_state(self, db_url, metadata, revision): + assert is_metadata_loaded(db_url, metadata) + with disposing_engine(db_url) as engine: + db = DatabaseStateCache(engine) + assert not db.has_sqlalchemymigrate_version_table() + assert db.has_alembic_version_table() + + class TestState6GxyState3TsiNoSam: + def test_database_combined(self, db_state6_gxy_state3_tsi_no_sam, metadata_state6_gxy_state3_tsi_no_sam): + db_url = db_state6_gxy_state3_tsi_no_sam + metadata = metadata_state6_gxy_state3_tsi_no_sam + assert is_metadata_loaded(db_url, metadata) + with disposing_engine(db_url) as engine: + db = DatabaseStateCache(engine) + assert not db.has_sqlalchemymigrate_version_table() + assert db.has_alembic_version_table() + assert AlembicManagerForTests.is_at_revision(engine, GXY_REVISION_2) + + +def _verify_databases(gxy_engine, tsi_engine=None): + verify_databases(gxy_engine, None, None, tsi_engine, None, None, False) + + +class TestDatabaseStates: + # Tests of primary function under different scenarios and database state. + + class TestNoState: + # Initial state: database does not exist. + # Expect: database created, initialized, versioned w/alembic. + # (we use `metadata_state6_{gxy|tsi|combined}` for final database schema) + def test_combined_database(self, url_factory, metadata_state6_combined): # noqa: F811 + db_url = url_factory() + with drop_database(db_url): + assert not database_exists(db_url) + with disposing_engine(db_url) as engine: + _verify_databases(engine) + assert database_is_up_to_date(db_url, metadata_state6_combined, GXY) + assert database_is_up_to_date(db_url, metadata_state6_combined, TSI) + + def test_separate_databases(self, url_factory, metadata_state6_gxy, metadata_state6_tsi): # noqa: F811 + db1_url, db2_url = url_factory(), url_factory() + assert not database_exists(db1_url) + assert not database_exists(db2_url) + with drop_database(db1_url), drop_database(db2_url): + with disposing_engine(db1_url) as engine1, disposing_engine(db2_url) as engine2: + _verify_databases(engine1, engine2) + assert database_is_up_to_date(db1_url, metadata_state6_gxy, GXY) + assert database_is_up_to_date(db2_url, metadata_state6_tsi, TSI) + + class TestState0: + # Initial state: database is empty. + # Expect: database created, initialized, versioned w/alembic. + def test_combined_database(self, url_factory, metadata_state6_combined): # noqa: F811 + db_url = url_factory() + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + assert database_exists(db_url) + assert database_is_empty(db_url) + _verify_databases(engine) + assert database_is_up_to_date(db_url, metadata_state6_combined, GXY) + assert database_is_up_to_date(db_url, metadata_state6_combined, TSI) + + def test_separate_databases(self, url_factory, metadata_state6_gxy, metadata_state6_tsi): # noqa: F811 + db1_url, db2_url = url_factory(), url_factory() + with create_and_drop_database(db1_url), create_and_drop_database(db2_url): + with disposing_engine(db1_url) as engine1, disposing_engine(db2_url) as engine2: + assert database_exists(db1_url) + assert database_exists(db2_url) + assert database_is_empty(db1_url) + assert database_is_empty(db2_url) + _verify_databases(engine1, engine2) + assert database_is_up_to_date(db1_url, metadata_state6_gxy, GXY) + assert database_is_up_to_date(db2_url, metadata_state6_tsi, TSI) + + class TestState1: + # Initial state: non-empty database, no version table. + # Expect: fail with appropriate message. + def test_combined_database(self, db_state1_combined): + with pytest.raises(NoVersionTableError): + with disposing_engine(db_state1_combined) as engine: + _verify_databases(engine) + + def test_separate_databases_gxy_raises_error(self, db_state1_gxy, db_state6_tsi): + with pytest.raises(NoVersionTableError): + with disposing_engine(db_state1_gxy) as engine1, disposing_engine(db_state6_tsi) as engine2: + _verify_databases(engine1, engine2) + + def test_separate_databases_tsi_raises_error(self, db_state6_gxy, db_state1_tsi): + with pytest.raises(NoVersionTableError): + with disposing_engine(db_state6_gxy) as engine1, disposing_engine(db_state1_tsi) as engine2: + _verify_databases(engine1, engine2) + + class TestState2: + # Initial state: non-empty database, SQLAlchemy Migrate version table present; however, + # the stored version is not the latest after which we could transition to Alembic. + # Expect: fail with appropriate message. + def test_combined_database(self, db_state2_combined): + with pytest.raises(IncorrectVersionError): + with disposing_engine(db_state2_combined) as engine: + _verify_databases(engine) + + def test_separate_databases_gxy_raises_error(self, db_state2_gxy, db_state6_tsi): + with pytest.raises(IncorrectVersionError): + with disposing_engine(db_state2_gxy) as engine1, disposing_engine(db_state6_tsi) as engine2: + _verify_databases(engine1, engine2) + + def test_separate_databases_tsi_raises_error(self, db_state6_gxy, db_state2_tsi): + with pytest.raises(IncorrectVersionError): + with disposing_engine(db_state6_gxy) as engine1, disposing_engine(db_state2_tsi) as engine2: + _verify_databases(engine1, engine2) + + class TestState3: + # Initial state: non-empty database, SQLAlchemy Migrate version table contains latest version + # under SQLAlchemy Migrate. + # Expect: + # a) auto-migrate enabled: alembic version table added, database upgraded to current version. + # b) auto-migrate disabled: fail with appropriate message. + def test_combined_database_automigrate( + self, + db_state3_combined, + metadata_state6_combined, + set_automigrate, + ): + db_url = db_state3_combined + with disposing_engine(db_state3_combined) as engine: + _verify_databases(engine) + assert database_is_up_to_date(db_url, metadata_state6_combined, GXY) + assert database_is_up_to_date(db_url, metadata_state6_combined, TSI) + + def test_separate_databases_automigrate( + self, + db_state3_gxy, + db_state3_tsi, + metadata_state6_gxy, + metadata_state6_tsi, + set_automigrate, + ): + db1_url, db2_url = db_state3_gxy, db_state3_tsi + with disposing_engine(db1_url) as engine1, disposing_engine(db2_url) as engine2: + _verify_databases(engine1, engine2) + assert database_is_up_to_date(db1_url, metadata_state6_gxy, GXY) + assert database_is_up_to_date(db2_url, metadata_state6_tsi, TSI) + + def test_combined_database_no_automigrate(self, db_state3_combined): + with pytest.raises(OutdatedDatabaseError): + with disposing_engine(db_state3_combined) as engine: + _verify_databases(engine) + + def test_separate_databases_no_automigrate_gxy_raises_error(self, db_state3_gxy, db_state6_tsi): + with pytest.raises(OutdatedDatabaseError): + with disposing_engine(db_state3_gxy) as engine1, disposing_engine(db_state6_tsi) as engine2: + _verify_databases(engine1, engine2) + + def test_separate_databases_no_automigrate_tsi_raises_error(self, db_state6_gxy, db_state3_tsi): + with pytest.raises(OutdatedDatabaseError): + with disposing_engine(db_state6_gxy) as engine1, disposing_engine(db_state3_tsi) as engine2: + _verify_databases(engine1, engine2) + + class TestState4: + # Initial state: non-empty database, SQLAlchemy Migrate version table present, Alembic version table present. + # Oldest Alembic revision. + # Expect: + # a) auto-migrate enabled: database upgraded to current version. + # b) auto-migrate disabled: fail with appropriate message. + def test_combined_database_automigrate( + self, + db_state4_combined, + metadata_state6_combined, + set_automigrate, + ): + db_url = db_state4_combined + with disposing_engine(db_url) as engine: + _verify_databases(engine) + assert database_is_up_to_date(db_url, metadata_state6_combined, GXY) + assert database_is_up_to_date(db_url, metadata_state6_combined, TSI) + + def test_separate_databases_automigrate( + self, + db_state4_gxy, + db_state4_tsi, + metadata_state6_gxy, + metadata_state6_tsi, + set_automigrate, + ): + db1_url, db2_url = db_state4_gxy, db_state4_tsi + with disposing_engine(db1_url) as engine1, disposing_engine(db2_url) as engine2: + _verify_databases(engine1, engine2) + assert database_is_up_to_date(db1_url, metadata_state6_gxy, GXY) + assert database_is_up_to_date(db2_url, metadata_state6_tsi, TSI) + + def test_combined_database_no_automigrate(self, db_state4_combined): + with pytest.raises(OutdatedDatabaseError): + with disposing_engine(db_state4_combined) as engine: + _verify_databases(engine) + + def test_separate_databases_no_automigrate_gxy_raises_error(self, db_state4_gxy, db_state6_tsi): + with pytest.raises(OutdatedDatabaseError): + with disposing_engine(db_state4_gxy) as engine1, disposing_engine(db_state6_tsi) as engine2: + _verify_databases(engine1, engine2) + + def test_separate_databases_no_automigrate_tsi_raises_error(self, db_state6_gxy, db_state4_tsi): + with pytest.raises(OutdatedDatabaseError): + with disposing_engine(db_state6_gxy) as engine1, disposing_engine(db_state4_tsi) as engine2: + _verify_databases(engine1, engine2) + + class TestState5: + # Initial state: non-empty database, Alembic version table present. + # Oldest Alembic revision that does not include SQLAlchemy Migrate version table. + # Expect: + # a) auto-migrate enabled: database upgraded to current version. + # b) auto-migrate disabled: fail with appropriate message. + def test_combined_database_automigrate(self, db_state5_combined, metadata_state6_combined, set_automigrate): + db_url = db_state5_combined + with disposing_engine(db_url) as engine: + _verify_databases(engine) + assert database_is_up_to_date(db_url, metadata_state6_combined, GXY) + assert database_is_up_to_date(db_url, metadata_state6_combined, TSI) + + def test_separate_databases_automigrate( + self, db_state5_gxy, db_state5_tsi, metadata_state6_gxy, metadata_state6_tsi, set_automigrate + ): + db1_url, db2_url = db_state5_gxy, db_state5_tsi + with disposing_engine(db1_url) as engine1, disposing_engine(db2_url) as engine2: + _verify_databases(engine1, engine2) + assert database_is_up_to_date(db1_url, metadata_state6_gxy, GXY) + assert database_is_up_to_date(db2_url, metadata_state6_tsi, TSI) + + def test_combined_database_no_automigrate(self, db_state5_combined): + with pytest.raises(OutdatedDatabaseError): + with disposing_engine(db_state5_combined) as engine: + _verify_databases(engine) + + def test_separate_databases_no_automigrate_gxy_raises_error(self, db_state5_gxy, db_state6_tsi): + with pytest.raises(OutdatedDatabaseError): + with disposing_engine(db_state5_gxy) as engine1, disposing_engine(db_state6_tsi) as engine2: + _verify_databases(engine1, engine2) + + def test_separate_databases_no_automigrate_tsi_raises_error(self, db_state6_gxy, db_state5_tsi): + with pytest.raises(OutdatedDatabaseError): + with disposing_engine(db_state6_gxy) as engine1, disposing_engine(db_state5_tsi) as engine2: + _verify_databases(engine1, engine2) + + class TestState6: + # Initial state: non-empty database, Alembic version table present, database up-to-date. + # Expect: do nothing. + def test_combined_database(self, db_state6_combined, metadata_state6_combined): + db_url = db_state6_combined + with disposing_engine(db_url) as engine: + _verify_databases(engine) + assert database_is_up_to_date(db_url, metadata_state6_combined, GXY) + assert database_is_up_to_date(db_url, metadata_state6_combined, TSI) + + def test_separate_databases(self, db_state6_gxy, db_state6_tsi, metadata_state6_gxy, metadata_state6_tsi): + db1_url, db2_url = db_state6_gxy, db_state6_tsi + with disposing_engine(db1_url) as engine1, disposing_engine(db2_url) as engine2: + _verify_databases(engine1, engine2) + assert database_is_up_to_date(db1_url, metadata_state6_gxy, GXY) + assert database_is_up_to_date(db2_url, metadata_state6_tsi, TSI) + + class TestPartiallyUpgradedCombinedDatabase: + # This only applies to an EXISTING, NONEMPTY database, that is COMBINED (both GXY and TSI models + # are in the same database). + # This covers several edge cases when the GXY model is up-to-date, but the TSI model is not. + # This may happen for a variety of reasons: + # - a database is being upgraded to Alembic: the GXY model is processed first, + # so GXY will become up-to-date before TSI; + # - a database is changed to "combined" (via setting install_database_connection): so TSI has not + # been initialized in this database. + # - a glitch in the Matrix, an act of god, etc. + # These tests verify the system's handling of such cases for 2 TSI states: + # - Database has no TSI tables: assume new TSI install. + # - Database has some TSI tables: assume TSI is at last pre-alembic state and can be migrated. + # (We expect a severly outdated TSI model to have been upgraded manually together with the GXY model; + # for there is no reasonable way to determine its state automatically without a version table.) + # + # Initial state for all tests: + # - combined database + # - GXY model: up-to-date + # - TSI model: not in Alembic version table + def test_case1_automigrate(self, db_state6_gxy, metadata_state6_combined, set_automigrate): + # Initial state: + # - no TSI tables exist + # - auto-migrate enabled + # Expect: database is up-to-date + db_url = db_state6_gxy + with disposing_engine(db_url) as engine: + _verify_databases(engine) + assert database_is_up_to_date(db_url, metadata_state6_combined, GXY) + assert database_is_up_to_date(db_url, metadata_state6_combined, TSI) + + def test_case1_no_automigrate(self, db_state6_gxy, metadata_state6_combined): + # Initial state: + # - no TSI tables exist + # - auto-migrate not enabled + # Expect: database is up-to-date (same as auto-migrate enabled) + db_url = db_state6_gxy + with disposing_engine(db_url) as engine: + _verify_databases(engine) + assert database_is_up_to_date(db_url, metadata_state6_combined, GXY) + assert database_is_up_to_date(db_url, metadata_state6_combined, TSI) + + def test_case2_automigrate(self, db_state6_gxy_state3_tsi_no_sam, metadata_state6_combined, set_automigrate): + # Initial state: + # - all pre-alembic TSI tables exist + # - auto-migrate enabled + # Expect: database is up-to-date + db_url = db_state6_gxy_state3_tsi_no_sam + with disposing_engine(db_url) as engine: + _verify_databases(engine) + assert database_is_up_to_date(db_url, metadata_state6_combined, GXY) + assert database_is_up_to_date(db_url, metadata_state6_combined, TSI) + + def test_case2_no_automigrate(self, db_state6_gxy_state3_tsi_no_sam): + # Initial state: + # - all pre-alembic TSI tables exist + # - auto-migrate not enabled + # Expect: fail with appropriate message + db_url = db_state6_gxy_state3_tsi_no_sam + with pytest.raises(OutdatedDatabaseError): + with disposing_engine(db_url) as engine: + _verify_databases(engine) + + +# Test helpers + their tests, misc. fixtures + + +@pytest.fixture(autouse=True) # always override AlembicManager +def set_create_additional(monkeypatch): + monkeypatch.setattr(DatabaseStateVerifier, "_create_additional_database_objects", lambda *_: None) + + +@pytest.fixture +def set_automigrate(monkeypatch): + monkeypatch.setattr(DatabaseStateVerifier, "is_auto_migrate", True) + + +@pytest.fixture(autouse=True) # always override AlembicManager +def set_alembic_manager(monkeypatch): + monkeypatch.setattr(migrations, "get_alembic_manager", lambda engine: AlembicManagerForTests(engine)) + + +@pytest.fixture(autouse=True) # always override gxy_metadata +def set_gxy_metadata(monkeypatch, metadata_state6_gxy): + monkeypatch.setattr(migrations, "get_gxy_metadata", lambda: metadata_state6_gxy) + + +@pytest.fixture(autouse=True) # always override tsi_metadata +def set_tsi_metadata(monkeypatch, metadata_state6_tsi): + monkeypatch.setattr(migrations, "get_tsi_metadata", lambda: metadata_state6_tsi) + + +class AlembicManagerForTests(AlembicManager): + def __init__(self, engine): + path1, path2 = _get_paths_to_version_locations() + config_dict = {"version_locations": f"{path1};{path2}"} + super().__init__(engine, config_dict) + + +def _get_paths_to_version_locations(): + # One does not simply use a relative path for both tests and package tests. + basepath = os.path.abspath(os.path.dirname(__file__)) + basepath = os.path.join(basepath, "versions") + path1 = os.path.join(basepath, "db1") + path2 = os.path.join(basepath, "db2") + return path1, path2 + + +def load_sqlalchemymigrate_version(db_url, version): + with disposing_engine(db_url) as engine: + with engine.connect() as conn: + sql_delete = f"delete from {SQLALCHEMYMIGRATE_TABLE}" # there can be only 1 row + sql_insert = f"insert into {SQLALCHEMYMIGRATE_TABLE} values('_', '_', {version})" + conn.execute(sql_delete) + conn.execute(sql_insert) + + +def test_load_sqlalchemymigrate_version(url_factory, metadata_state2_gxy): # noqa F811 + db_url = url_factory() + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + load_metadata(metadata_state2_gxy, engine) + sql = f"select version from {SQLALCHEMYMIGRATE_TABLE}" + version = 42 + with engine.connect() as conn: + result = conn.execute(sql).scalar() + assert result != version + load_sqlalchemymigrate_version(db_url, version) + result = conn.execute(sql).scalar() + assert result == version + + +def test_get_last_sqlalchemymigrate_version(): + assert get_last_sqlalchemymigrate_version(GXY) == SQLALCHEMYMIGRATE_LAST_VERSION_GXY + assert get_last_sqlalchemymigrate_version(TSI) == SQLALCHEMYMIGRATE_LAST_VERSION_TSI + + +def database_is_empty(db_url): + with disposing_engine(db_url) as engine: + with engine.connect() as conn: + metadata = MetaData() + metadata.reflect(bind=conn) + return not bool(metadata.tables) + + +def test_database_is_empty(url_factory, metadata_state1_gxy): # noqa F811 + db_url = url_factory() + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + assert database_is_empty(db_url) + load_metadata(metadata_state1_gxy, engine) + assert not database_is_empty(db_url) + + +def database_is_up_to_date(db_url, current_state_metadata, model): + # True if the database at `db_url` has the `current_state_metadata` loaded, + # and is up-to-date with respect to `model` (has the most recent Alembic revision). + + # NOTE: Ideally, we'd determine the current metadata based on the model. However, since + # metadata is a fixture, it cannot be called directly, and instead has to be + # passed as an argument. That's why we ensure that the passed metadata is current + # (this guards againt an incorrect test). + if model == GXY: + current_tables = {"gxy_table1", "gxy_table2", "gxy_table3"} + elif model == TSI: + current_tables = {"tsi_table1", "tsi_table2", "tsi_table3"} + is_metadata_current = current_tables <= set(current_state_metadata.tables) + + with disposing_engine(db_url) as engine: + is_loaded = is_metadata_loaded(db_url, current_state_metadata) + am = AlembicManagerForTests(engine) + return is_metadata_current and is_loaded and am.is_up_to_date(model) + + +def test_database_is_up_to_date(url_factory, metadata_state6_gxy): # noqa F811 + db_url, metadata = url_factory(), metadata_state6_gxy + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + assert not database_is_up_to_date(db_url, metadata, GXY) + load_metadata(metadata, engine) + am = AlembicManagerForTests(engine) + am.stamp_revision("heads") + assert database_is_up_to_date(db_url, metadata, GXY) + + +def test_database_is_up_to_date_for_passed_model_only(url_factory, metadata_state6_gxy): # noqa F811 + db_url, metadata = url_factory(), metadata_state6_gxy + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + assert not database_is_up_to_date(db_url, metadata, GXY) + assert not database_is_up_to_date(db_url, metadata, TSI) + load_metadata(metadata, engine) + am = AlembicManagerForTests(engine) + am.stamp_revision("heads") + assert database_is_up_to_date(db_url, metadata, GXY) + assert not database_is_up_to_date(db_url, metadata, TSI) + + +def test_database_is_not_up_to_date_if_noncurrent_metadata_passed(url_factory, metadata_state5_gxy): # noqa F811 + db_url, metadata = url_factory(), metadata_state5_gxy + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + load_metadata(metadata, engine) + am = AlembicManagerForTests(engine) + am.stamp_revision("heads") + assert not database_is_up_to_date(db_url, metadata, GXY) + + +def test_database_is_not_up_to_date_if_metadata_not_loaded(url_factory, metadata_state6_gxy): # noqa F811 + db_url, metadata = url_factory(), metadata_state6_gxy + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + am = AlembicManagerForTests(engine) + am.stamp_revision("heads") + assert not database_is_up_to_date(db_url, metadata, GXY) + + +def test_database_is_not_up_to_date_if_alembic_not_added(url_factory, metadata_state6_gxy): # noqa F811 + db_url, metadata = url_factory(), metadata_state6_gxy + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + load_metadata(metadata, engine) + assert not database_is_up_to_date(db_url, metadata, GXY) + + +def is_metadata_loaded(db_url, metadata): + # True if the set of tables from the up-to-date state metadata (state6) + # is a subset of the metadata reflected from `db_url`. + with disposing_engine(db_url) as engine: + with engine.connect() as conn: + db_metadata = MetaData() + db_metadata.reflect(bind=conn) + tables = _get_tablenames(metadata) + return set(tables) <= set(db_metadata.tables) + + +def _get_tablenames(metadata): + metadata = listify(metadata) + tables = set() + for md in metadata: + tables |= set(md.tables) + return tables + + +def test_is_metadata_loaded(url_factory, metadata_state1_gxy): # noqa F811 + db_url, metadata = url_factory(), metadata_state1_gxy + with create_and_drop_database(db_url): + assert not is_metadata_loaded(db_url, metadata) + with disposing_engine(db_url) as engine: + with engine.connect() as conn: + metadata.create_all(bind=conn) + assert is_metadata_loaded(db_url, metadata) + + +def test_is_multiple_metadata_loaded(url_factory, metadata_state1_gxy, metadata_state1_tsi): # noqa F811 + db_url = url_factory() + metadata = [metadata_state1_gxy, metadata_state1_tsi] + with create_and_drop_database(db_url): + assert not is_metadata_loaded(db_url, metadata) + with disposing_engine(db_url) as engine: + with engine.connect() as conn: + metadata_state1_gxy.create_all(bind=conn) + metadata_state1_tsi.create_all(bind=conn) + assert is_metadata_loaded(db_url, metadata) + + +def test_load_metadata(url_factory, metadata_state1_gxy): # noqa F811 + db_url, metadata = url_factory(), metadata_state1_gxy + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + assert not is_metadata_loaded(db_url, metadata) + load_metadata(metadata, engine) + assert is_metadata_loaded(db_url, metadata) + + +# Fixtures: databases loaded with a given state. +# +# Each state has 3 versions: gxy, tsi, combined (see fixtures/schemas.py) +# +# Each fixture is constructed as follows: +# 1. Create a new sqlite database url. +# 2. Pass database url with state metadata fixture to a `_setup_db_url_state{state#}` function. +# 3. Inside the function, create database and load any state-specific data. + + +# state 1 +@pytest.fixture +def db_state1_gxy(url_factory, metadata_state1_gxy): # noqa F811 + yield from _setup_db_state1(url_factory(), metadata_state1_gxy) + + +@pytest.fixture +def db_state1_tsi(url_factory, metadata_state1_tsi): # noqa F811 + yield from _setup_db_state1(url_factory(), metadata_state1_tsi) + + +@pytest.fixture +def db_state1_combined(url_factory, metadata_state1_combined): # noqa F811 + yield from _setup_db_state1(url_factory(), metadata_state1_combined) + + +def _setup_db_state1(db_url, metadata): + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + load_metadata(metadata, engine) + yield db_url + + +# state 2 +@pytest.fixture +def db_state2_gxy(url_factory, metadata_state2_gxy): # noqa F811 + yield from _setup_db_state2(url_factory(), metadata_state2_gxy) + + +@pytest.fixture +def db_state2_tsi(url_factory, metadata_state2_tsi): # noqa F811 + yield from _setup_db_state2(url_factory(), metadata_state2_tsi) + + +@pytest.fixture +def db_state2_combined(url_factory, metadata_state2_combined): # noqa F811 + yield from _setup_db_state2(url_factory(), metadata_state2_combined) + + +def _setup_db_state2(db_url, metadata): + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + load_metadata(metadata, engine) + yield db_url + + +# state 3 +@pytest.fixture +def db_state3_gxy(url_factory, metadata_state3_gxy): # noqa F811 + yield from _setup_db_state3(url_factory(), metadata_state3_gxy, SQLALCHEMYMIGRATE_LAST_VERSION_GXY) + + +@pytest.fixture +def db_state3_tsi(url_factory, metadata_state3_tsi): # noqa F811 + yield from _setup_db_state3(url_factory(), metadata_state3_tsi, SQLALCHEMYMIGRATE_LAST_VERSION_TSI) + + +@pytest.fixture +def db_state3_combined(url_factory, metadata_state3_combined): # noqa F811 + # the SQLAlchemy Migrate version in a combined database is GXY (we only stored the TSI version + # if the database was separate). + yield from _setup_db_state3(url_factory(), metadata_state3_combined, SQLALCHEMYMIGRATE_LAST_VERSION_GXY) + + +def _setup_db_state3(db_url, metadata, last_version): + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + load_metadata(metadata, engine) + load_sqlalchemymigrate_version(db_url, last_version) + yield db_url + + +# state 4 +@pytest.fixture +def db_state4_gxy(url_factory, metadata_state4_gxy): # noqa F811 + yield from _setup_db_state4(url_factory(), metadata_state4_gxy, SQLALCHEMYMIGRATE_LAST_VERSION_GXY, GXY) + + +@pytest.fixture +def db_state4_tsi(url_factory, metadata_state4_tsi): # noqa F811 + yield from _setup_db_state4(url_factory(), metadata_state4_tsi, SQLALCHEMYMIGRATE_LAST_VERSION_TSI, TSI) + + +@pytest.fixture +def db_state4_combined(url_factory, metadata_state4_combined): # noqa F811 + # the SQLAlchemy Migrate version in a combined database is GXY (we only stored the TSI version + # if the database was separate). + yield from _setup_db_state4(url_factory(), metadata_state4_combined, SQLALCHEMYMIGRATE_LAST_VERSION_GXY) + + +def _setup_db_state4(db_url, metadata, last_version, model=None): + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + load_metadata(metadata, engine) + load_sqlalchemymigrate_version(db_url, last_version) + + revisions: Union[str, list] + if model == GXY: + revisions = GXY_REVISION_0 + elif model == TSI: + revisions = TSI_REVISION_0 + else: + revisions = [GXY_REVISION_0, TSI_REVISION_0] + am = AlembicManagerForTests(engine) + am.stamp_revision(revisions) + + yield db_url + + +# state 5 +@pytest.fixture +def db_state5_gxy(url_factory, metadata_state5_gxy): # noqa F811 + yield from _setup_db_state5(url_factory(), metadata_state5_gxy, GXY) + + +@pytest.fixture +def db_state5_tsi(url_factory, metadata_state5_tsi): # noqa F811 + yield from _setup_db_state5(url_factory(), metadata_state5_tsi, TSI) + + +@pytest.fixture +def db_state5_combined(url_factory, metadata_state5_combined): # noqa F811 + yield from _setup_db_state5(url_factory(), metadata_state5_combined) + + +def _setup_db_state5(db_url, metadata, model=None): + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + load_metadata(metadata, engine) + + revisions: Union[str, list] + if model == GXY: + revisions = GXY_REVISION_1 + elif model == TSI: + revisions = TSI_REVISION_1 + else: + revisions = [GXY_REVISION_1, TSI_REVISION_1] + am = AlembicManagerForTests(engine) + am.stamp_revision(revisions) + + yield db_url + + +# state 6 +@pytest.fixture +def db_state6_gxy(url_factory, metadata_state6_gxy): # noqa F811 + yield from _setup_db_state6(url_factory(), metadata_state6_gxy, GXY) + + +@pytest.fixture +def db_state6_tsi(url_factory, metadata_state6_tsi): # noqa F811 + yield from _setup_db_state6(url_factory(), metadata_state6_tsi, TSI) + + +@pytest.fixture +def db_state6_combined(url_factory, metadata_state6_combined): # noqa F811 + yield from _setup_db_state6(url_factory(), metadata_state6_combined) + + +def _setup_db_state6(db_url, metadata, model=None): + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + load_metadata(metadata, engine) + + revisions: Union[str, list] + if model == GXY: + revisions = GXY_REVISION_2 + elif model == TSI: + revisions = TSI_REVISION_2 + else: + revisions = [GXY_REVISION_2, TSI_REVISION_2] + am = AlembicManagerForTests(engine) + am.stamp_revision(revisions) + + yield db_url + + +# state 6+3 +@pytest.fixture +def db_state6_gxy_state3_tsi_no_sam(url_factory, metadata_state6_gxy_state3_tsi_no_sam): # noqa F811 + db_url = url_factory() + metadata = metadata_state6_gxy_state3_tsi_no_sam + with create_and_drop_database(db_url): + with disposing_engine(db_url) as engine: + load_metadata(metadata, engine) + am = AlembicManagerForTests(engine) + am.stamp_revision(GXY_REVISION_2) + yield db_url + + +# Test LegacyManageDb (used by script/manage_db.py) + + +@pytest.fixture(autouse=True) +def legacy_manage_db(monkeypatch): + def get_alembic_cfg(self): + path = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir, os.pardir) + path = os.path.normpath(path) + # Adjust path when running from packages + if os.path.split(path)[1] == "packages": + path = os.path.join(path, os.pardir) + path = os.path.join(path, "lib", "galaxy", "model", "migrations", "alembic.ini") + + config = Config(path) + path1, path2 = _get_paths_to_version_locations() + config.set_main_option("version_locations", f"{path1};{path2}") + return config + + monkeypatch.setattr(LegacyManageDb, "_get_alembic_cfg", get_alembic_cfg) + + +@pytest.fixture(autouse=True) +def set_db_urls(monkeypatch): + # Do not try to access galaxy config; values not needed. + def no_config_call(self): + self.gxy_db_url = "a string" + self.tsi_db_url = "a stirng" + + monkeypatch.setattr(LegacyManageDb, "_set_db_urls", no_config_call) + + +@pytest.fixture(autouse=True) # always override AlembicManager +def set_alembic_manager2(monkeypatch): + monkeypatch.setattr(scripts, "get_alembic_manager", lambda engine: AlembicManagerForTests(engine)) + + +class TestLegacyManageDbScript: + def test_get_gxy_version(self): + mdb = LegacyManageDb() + version = mdb.get_gxy_version() + assert version == GXY_REVISION_2 + + class TestState2: + # Initial state: non-empty database, SQLAlchemy Migrate version table present; however, + # the stored version is not the latest after which we could transition to Alembic. + def test_get_gxy_db_version__state2__gxy_database(self, db_state2_gxy): + # Expect: fail + db_url = db_state2_gxy + with pytest.raises(IncorrectVersionError): + mdb = LegacyManageDb() + mdb.get_gxy_db_version(db_url) + + def test_get_gxy_db_version__state2__combined_database(self, db_state2_combined): + # Expect: fail + db_url = db_state2_combined + with pytest.raises(IncorrectVersionError): + mdb = LegacyManageDb() + mdb.get_gxy_db_version(db_url) + + class TestState3: + # Initial state: non-empty database, SQLAlchemy Migrate version table contains latest version + # under SQLAlchemy Migrate. + def test_get_gxy_db_version___state3__gxy_database(self, db_state3_gxy): + # Expect: return SQLALCHEMYMIGRATE_LAST_VERSION_GXY + db_url = db_state3_gxy + mdb = LegacyManageDb() + version = mdb.get_gxy_db_version(db_url) + assert version == SQLALCHEMYMIGRATE_LAST_VERSION_GXY + + def test_get_gxy_db_version___state3__combined_database(self, db_state3_combined): + # Expect: return SQLALCHEMYMIGRATE_LAST_VERSION_GXY + db_url = db_state3_combined + mdb = LegacyManageDb() + version = mdb.get_gxy_db_version(db_url) + assert version == SQLALCHEMYMIGRATE_LAST_VERSION_GXY + + def test_upgrade__state3__combined_database(self, db_state3_combined, metadata_state6_combined): + # Expect: upgrade to current version + db_url = db_state3_combined + mdb = LegacyManageDb() + mdb.run_upgrade(db_url, db_url) + assert database_is_up_to_date(db_url, metadata_state6_combined, GXY) + assert database_is_up_to_date(db_url, metadata_state6_combined, TSI) + + def test_upgrade__state3__separate_databases( + self, db_state3_gxy, db_state3_tsi, metadata_state6_gxy, metadata_state6_tsi + ): + # Expect: upgrade to current version + db1_url, db2_url = db_state3_gxy, db_state3_tsi + mdb = LegacyManageDb() + mdb.run_upgrade(db1_url, db2_url) + assert database_is_up_to_date(db1_url, metadata_state6_gxy, GXY) + assert database_is_up_to_date(db2_url, metadata_state6_tsi, TSI) + + class TestState4: + # Initial state: non-empty database, SQLAlchemy Migrate version table present, Alembic version table present. + # Oldest Alembic revision. + # Expect: return GXY_REVISION_0 + def test_get_gxy_db_version___state4__gxy_database(self, db_state4_gxy): + db_url = db_state4_gxy + mdb = LegacyManageDb() + version = mdb.get_gxy_db_version(db_url) + assert version == GXY_REVISION_0 + + def test_get_gxy_db_version___state4__combined_database(self, db_state4_combined): + db_url = db_state4_combined + mdb = LegacyManageDb() + version = mdb.get_gxy_db_version(db_url) + assert version == GXY_REVISION_0 + + def test_upgrade__state4__combined_database(self, db_state4_combined, metadata_state6_combined): + # Expect: upgrade to current version + db_url = db_state4_combined + mdb = LegacyManageDb() + mdb.run_upgrade(db_url, db_url) + assert database_is_up_to_date(db_url, metadata_state6_combined, GXY) + assert database_is_up_to_date(db_url, metadata_state6_combined, TSI) + + def test_upgrade__state4__separate_databases( + self, db_state4_gxy, db_state4_tsi, metadata_state6_gxy, metadata_state6_tsi + ): + # Expect: upgrade to current version + db1_url, db2_url = db_state4_gxy, db_state4_tsi + mdb = LegacyManageDb() + mdb.run_upgrade(db1_url, db2_url) + assert database_is_up_to_date(db1_url, metadata_state6_gxy, GXY) + assert database_is_up_to_date(db2_url, metadata_state6_tsi, TSI) diff --git a/test/unit/data/model/migrations/test_scripts.py b/test/unit/data/model/migrations/test_scripts.py new file mode 100644 index 000000000000..216bc6a5efe0 --- /dev/null +++ b/test/unit/data/model/migrations/test_scripts.py @@ -0,0 +1,147 @@ +import pytest + +from galaxy.model.migrations.scripts import ( + LegacyScripts, + LegacyScriptsException, +) + + +@pytest.fixture(autouse=True) +def set_db_urls(monkeypatch): + # Do not try to access galaxy config; values not needed. + def no_config_call(self): + self.gxy_url = "a string" + self.tsi_url = "a stirng" + + monkeypatch.setattr(LegacyScripts, "load_db_urls", no_config_call) + + +@pytest.fixture(autouse=True) # set combined db for all tests +def set_combined(monkeypatch): + monkeypatch.setattr(LegacyScripts, "_is_one_database", lambda self: True) + + +@pytest.fixture +def set_separate(monkeypatch): + monkeypatch.setattr(LegacyScripts, "_is_one_database", lambda self: False) + + +class TestLegacyScripts: + @pytest.mark.parametrize("database_arg", ["galaxy", "install"]) + def test_pop_database_name(self, database_arg): + # arg_value = 'install' + argv = ["caller", "--alembic-config", "path-to-alembic", "upgrade", "--version=abc", database_arg] + ls = LegacyScripts(argv) + + ls.pop_database_argument() + assert ls.database == database_arg + assert argv == ["caller", "--alembic-config", "path-to-alembic", "upgrade", "--version=abc"] + + def test_pop_database_name_use_default(self): + argv = ["caller", "--alembic-config", "path-to-alembic", "upgrade", "--version=abc"] + ls = LegacyScripts(argv) + ls.pop_database_argument() + assert ls.database == LegacyScripts.DEFAULT_DB_ARG + assert argv == ["caller", "--alembic-config", "path-to-alembic", "upgrade", "--version=abc"] + + @pytest.mark.parametrize("arg_name", LegacyScripts.LEGACY_CONFIG_FILE_ARG_NAMES) + def test_rename_config_arg(self, arg_name): + # `-c|--config|__config-file` should be renamed to `--galaxy-config` + argv = ["caller", "--alembic-config", "path-to-alembic", arg_name, "path-to-galaxy", "upgrade", "--version=abc"] + LegacyScripts(argv).rename_config_argument() + assert argv == [ + "caller", + "--alembic-config", + "path-to-alembic", + "--galaxy-config", + "path-to-galaxy", + "upgrade", + "--version=abc", + ] + + def test_rename_config_arg_reordered_args(self): + # `-c|--config|__config-file` should be renamed to `--galaxy-config` + argv = ["caller", "--alembic-config", "path-to-alembic", "upgrade", "--version=abc", "-c", "path-to-galaxy"] + LegacyScripts(argv).rename_config_argument() + assert argv == [ + "caller", + "--alembic-config", + "path-to-alembic", + "upgrade", + "--version=abc", + "--galaxy-config", + "path-to-galaxy", + ] + + def test_rename_alembic_config_arg(self): + # `--alembic-config` should be renamed to `-c` + argv = ["caller", "--alembic-config", "path-to-alembic", "upgrade", "--version=abc"] + LegacyScripts(argv).rename_alembic_config_argument() + assert argv == ["caller", "-c", "path-to-alembic", "upgrade", "--version=abc"] + + def test_rename_alembic_config_arg_raises_error_if_c_arg_present(self): + # Ensure alembic config arg is renamed AFTER renaming the galaxy config arg. Raise error otherwise. + argv = ["caller", "--alembic-config", "path-to-alembic", "-c", "path-to-galaxy", "upgrade", "--version=abc"] + with pytest.raises(LegacyScriptsException): + LegacyScripts(argv).rename_alembic_config_argument() + + def test_convert__version_arg_1(self): + # `sh manage_db.sh upgrade --version X` >> `... upgrade X` + argv = ["caller", "--alembic-config", "path-to-alembic", "upgrade", "--version", "abc"] + LegacyScripts(argv).convert_args() + assert argv == ["caller", "-c", "path-to-alembic", "upgrade", "abc"] + + def test_convert__version_arg_2(self): + # `sh manage_db.sh upgrade --version=X` >> `... upgrade X` + argv = ["caller", "--alembic-config", "path-to-alembic", "upgrade", "--version=abc"] + LegacyScripts(argv).convert_args() + assert argv == ["caller", "-c", "path-to-alembic", "upgrade", "abc"] + + def test_convert__no_version_no_model_combined_database(self): + # `sh manage_db.sh upgrade` >> `... upgrade heads` + # No version and no model implies "upgrade the default db (which is galaxy) to its latest version". + # If it is combined, we upgrade both models: gxy and tsi. + argv = ["caller", "--alembic-config", "path-to-alembic", "upgrade"] + LegacyScripts(argv).convert_args() + assert argv == ["caller", "-c", "path-to-alembic", "upgrade", "heads"] + + def test_convert__no_version_galaxy_model_combined_database_(self): + # `sh manage_db.sh upgrade galaxy` >> `... upgrade heads` + # same as no model: if combined we upgrade the whole database + argv = ["caller", "--alembic-config", "path-to-alembic", "upgrade", "galaxy"] + LegacyScripts(argv).convert_args() + assert argv == ["caller", "-c", "path-to-alembic", "upgrade", "heads"] + + def test_convert__no_version_install_model_combined_database_(self): + # `sh manage_db.sh upgrade install` >> `... upgrade heads` + # same as no model: if combined we upgrade the whole database + argv = ["caller", "--alembic-config", "path-to-alembic", "upgrade", "install"] + LegacyScripts(argv).convert_args() + assert argv == ["caller", "-c", "path-to-alembic", "upgrade", "heads"] + + def test_convert__no_version_no_model_separate_databases(self, set_separate): + # `sh manage_db.sh upgrade` >> `... upgrade gxy@head` + # No version and no model implies "upgrade the default db (which is galaxy) to its latest version". + # Since the tsi model has its own db, we only upgrade the gxy model. + argv = ["caller", "--alembic-config", "path-to-alembic", "upgrade"] + LegacyScripts(argv).convert_args() + assert argv == ["caller", "-c", "path-to-alembic", "upgrade", "gxy@head"] + + def test_convert__no_version_galaxy_model_separate_databases(self, set_separate): + # `sh manage_db.sh upgrade galaxy` >> `... upgrade gxy@head` + # No version + a model implies "upgrade the db for the specified model to its latest version". + argv = ["caller", "--alembic-config", "path-to-alembic", "upgrade", "galaxy"] + LegacyScripts(argv).convert_args() + assert argv == ["caller", "-c", "path-to-alembic", "upgrade", "gxy@head"] + + def test_convert__no_version_install_model_separate_databases(self, set_separate): + # `sh manage_db.sh upgrade install` >> `... upgrade tsi@head` + # No version + a model implies "upgrade the db for the specified model to its latest version". + argv = ["caller", "--alembic-config", "path-to-alembic", "upgrade", "install"] + LegacyScripts(argv).convert_args() + assert argv == ["caller", "-c", "path-to-alembic", "upgrade", "tsi@head"] + + def test_downgrade_with_no_version_argument_raises_error(self): + argv = ["caller", "--alembic-config", "path-to-alembic", "downgrade"] + with pytest.raises(LegacyScriptsException): + LegacyScripts(argv).convert_args() diff --git a/test/unit/data/model/migrations/versions/db1/2e8a580bc79a_drop_sqlachemymigrate_table.py b/test/unit/data/model/migrations/versions/db1/2e8a580bc79a_drop_sqlachemymigrate_table.py new file mode 100644 index 000000000000..d80b518976b8 --- /dev/null +++ b/test/unit/data/model/migrations/versions/db1/2e8a580bc79a_drop_sqlachemymigrate_table.py @@ -0,0 +1,34 @@ +"""drop sqlachemymigrate table + +Revision ID: 2e8a580bc79a +Revises: 62695fac6cc0 +Create Date: 2021-11-05 16:29:19.123118 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "2e8a580bc79a" +down_revision = "62695fac6cc0" +branch_labels = None +depends_on = None + + +def upgrade(): + # This table exists in both schemas: gxy and tsi. With a combined database, + # this migration will be applied twice to the same database, so we ignore + # the error that happens on the second run when the table has been dropped. + try: + op.drop_table("migrate_version", must_exist=True) + except sa.exc.InvalidRequestError: + pass + + +def downgrade(): + op.create_table( + "migrate_version", + sa.Column("repository_id", sa.String(250), primary_key=True), + sa.Column("repository_path", sa.Text), + sa.Column("version", sa.Integer), + ) diff --git a/test/unit/data/model/migrations/versions/db1/62695fac6cc0_create_gxy_test_branch.py b/test/unit/data/model/migrations/versions/db1/62695fac6cc0_create_gxy_test_branch.py new file mode 100644 index 000000000000..afc3204a3095 --- /dev/null +++ b/test/unit/data/model/migrations/versions/db1/62695fac6cc0_create_gxy_test_branch.py @@ -0,0 +1,22 @@ +"""create gxy test branch + +Revision ID: 62695fac6cc0 +Revises: +Create Date: 2021-11-05 16:28:30.497050 + +""" + + +# revision identifiers, used by Alembic. +revision = "62695fac6cc0" +down_revision = None +branch_labels = ("gxy",) +depends_on = None + + +def upgrade(): + pass + + +def downgrade(): + pass diff --git a/test/unit/data/model/migrations/versions/db1/e02cef55763c_add_gxy_table3.py b/test/unit/data/model/migrations/versions/db1/e02cef55763c_add_gxy_table3.py new file mode 100644 index 000000000000..37f9a596f95f --- /dev/null +++ b/test/unit/data/model/migrations/versions/db1/e02cef55763c_add_gxy_table3.py @@ -0,0 +1,26 @@ +"""add foo3 table + +Revision ID: e02cef55763c +Revises: 2e8a580bc79a +Create Date: 2021-11-05 16:30:30.521436 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "e02cef55763c" +down_revision = "2e8a580bc79a" +branch_labels = None +depends_on = None + + +def upgrade(): + op.create_table( + "gxy_table3", + sa.Column("id", sa.Integer, primary_key=True), + ) + + +def downgrade(): + op.drop_table("gxy_table3") diff --git a/test/unit/data/model/migrations/versions/db2/0e28bf2fb7b5_add_tsi_table3.py b/test/unit/data/model/migrations/versions/db2/0e28bf2fb7b5_add_tsi_table3.py new file mode 100644 index 000000000000..d692c9f4937b --- /dev/null +++ b/test/unit/data/model/migrations/versions/db2/0e28bf2fb7b5_add_tsi_table3.py @@ -0,0 +1,26 @@ +"""add bar3 table + +Revision ID: 0e28bf2fb7b5 +Revises: 8364ef1cab05 +Create Date: 2021-11-05 16:31:00.530235 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "0e28bf2fb7b5" +down_revision = "8364ef1cab05" +branch_labels = None +depends_on = None + + +def upgrade(): + op.create_table( + "tsi_table3", + sa.Column("id", sa.Integer, primary_key=True), + ) + + +def downgrade(): + op.drop_table("tsi_table3") diff --git a/test/unit/data/model/migrations/versions/db2/1bceec30363a_create_tsi_test_branch.py b/test/unit/data/model/migrations/versions/db2/1bceec30363a_create_tsi_test_branch.py new file mode 100644 index 000000000000..7209e389f8e7 --- /dev/null +++ b/test/unit/data/model/migrations/versions/db2/1bceec30363a_create_tsi_test_branch.py @@ -0,0 +1,22 @@ +"""create tsi test branch + +Revision ID: 1bceec30363a +Revises: +Create Date: 2021-11-05 16:28:45.450830 + +""" + + +# revision identifiers, used by Alembic. +revision = "1bceec30363a" +down_revision = None +branch_labels = ("tsi",) +depends_on = None + + +def upgrade(): + pass + + +def downgrade(): + pass diff --git a/test/unit/data/model/migrations/versions/db2/8364ef1cab05_drop_sqlachemymigrate_table.py b/test/unit/data/model/migrations/versions/db2/8364ef1cab05_drop_sqlachemymigrate_table.py new file mode 100644 index 000000000000..ac4c9a98879b --- /dev/null +++ b/test/unit/data/model/migrations/versions/db2/8364ef1cab05_drop_sqlachemymigrate_table.py @@ -0,0 +1,34 @@ +"""drop sqlachemymigrate table + +Revision ID: 8364ef1cab05 +Revises: 1bceec30363a +Create Date: 2021-11-05 16:30:51.369967 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "8364ef1cab05" +down_revision = "1bceec30363a" +branch_labels = None +depends_on = None + + +def upgrade(): + # This table exists in both schemas: gxy and tsi. With a combined database, + # this migration will be applied twice to the same database, so we ignore + # the error that happens on the second run when the table has been dropped. + try: + op.drop_table("migrate_version", must_exist=True) + except sa.exc.InvalidRequestError: + pass + + +def downgrade(): + op.create_table( + "migrate_version", + sa.Column("repository_id", sa.String(250), primary_key=True), + sa.Column("repository_path", sa.Text), + sa.Column("version", sa.Integer), + ) diff --git a/tox.ini b/tox.ini index 0122b93703f0..636c8ac72428 100644 --- a/tox.ini +++ b/tox.ini @@ -27,7 +27,6 @@ setenv = unit: GALAXY_VIRTUAL_ENV={envdir} unit: GALAXY_ENABLE_BETA_COMPRESSED_GENBANK_SNIFFING=1 mulled: GALAXY_TEST_INCLUDE_SLOW=1 - check_indexes: GALAXY_TEST_FORCE_DATABASE_MIGRATION=1 check_indexes: GALAXY_SKIP_CLIENT_BUILD=1 deps = lint,lint_docstring,lint_docstring_include_list,mypy: -rlib/galaxy/dependencies/pinned-lint-requirements.txt