Skip to content

Commit

Permalink
Refeshes all tables in Hub DB
Browse files Browse the repository at this point in the history
 - This patch collects the existing data from db deletes
   the existing tables, recreates the tables back and
   inserts the data in order to update the db and makes it
   compatible with `gorm 2.0`
-  This patch also removes two migrations since we are refreshing whole
database.

Signed-off-by: Puneet Punamiya <ppunamiy@redhat.com>
Signed-off by: Vinamra Jain <vinjain@redhat.com>
  • Loading branch information
PuneetPunamiya authored and vinamra28 committed Aug 2, 2021
1 parent 49e68f8 commit 51e5b00
Show file tree
Hide file tree
Showing 5 changed files with 263 additions and 91 deletions.

This file was deleted.

38 changes: 0 additions & 38 deletions api/pkg/db/migration/202107131802_remove_existing_categories.go

This file was deleted.

258 changes: 258 additions & 0 deletions api/pkg/db/migration/202107291608_refresh_all_tables.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,258 @@
// Copyright © 2021 The Tekton Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package migration

import (
"github.com/go-gormigrate/gormigrate/v2"
"github.com/tektoncd/hub/api/gen/log"
"github.com/tektoncd/hub/api/pkg/db/model"
"gorm.io/gorm"
)

func refreshAllTables(log *log.Logger) *gormigrate.Migration {
return &gormigrate.Migration{
ID: "202107291608_refresh_all_tables",
Migrate: func(db *gorm.DB) error {
txn := db.Begin()
err := migrateDB(txn, log)
if err != nil {
txn.Rollback()
log.Error(err)
}
txn.Commit()
return nil
},
}
}

func migrateDB(txn *gorm.DB, log *log.Logger) error {
var user []model.User
if err := txn.Find(&user).Error; err != nil {
log.Error(err)
return err
}

var user_scopes []model.UserScope
if err := txn.Find(&user_scopes).Error; err != nil {
log.Error(err)
return err
}

var user_rating []model.UserResourceRating
if err := txn.Find(&user_rating).Error; err != nil {
log.Error(err)
return err
}

var tags []model.Tag
if err := txn.Find(&tags).Error; err != nil {
log.Error(err)
return err
}

var sync_job []model.SyncJob
if err := txn.Find(&sync_job).Error; err != nil {
log.Error(err)
return err
}

var scopes []model.Scope
if err := txn.Find(&scopes).Error; err != nil {
log.Error(err)
return err
}

var resources []model.Resource
if err := txn.Find(&resources).Error; err != nil {
log.Error(err)
return err
}

var resource_versions []model.ResourceVersion
if err := txn.Find(&resource_versions).Error; err != nil {
log.Error(err)
return err
}

var resource_tags []model.ResourceTag
if err := txn.Find(&resource_tags).Error; err != nil {
log.Error(err)
return err
}

var configs []model.Config
if err := txn.Find(&configs).Error; err != nil {
log.Error(err)
return err
}

var catalog []model.Catalog
if err := txn.Find(&catalog).Error; err != nil {
log.Error(err)
return err
}

var catalog_error []model.CatalogError
if err := txn.Find(&catalog_error).Error; err != nil {
log.Error(err)
return err
}

if err := txn.Migrator().DropTable(&model.Category{},
&model.Tag{},
&model.Catalog{},
&model.CatalogError{},
&model.Resource{},
&model.ResourceVersion{},
&model.User{},
&model.UserResourceRating{},
&model.SyncJob{},
&model.Scope{},
&model.UserScope{},
&model.Config{},
&model.ResourceTag{},
); err != nil {
log.Error(err)

return err
}

if err := txn.AutoMigrate(
&model.Tag{},
&model.Catalog{},
&model.CatalogError{},
&model.Resource{},
&model.ResourceVersion{},
&model.User{},
&model.UserResourceRating{},
&model.SyncJob{},
&model.Scope{},
&model.UserScope{},
&model.Config{},
&model.ResourceTag{},
); err != nil {
log.Error(err)
return err
}

if len(catalog) > 0 {
if err := txn.Create(
&catalog,
).Error; err != nil {
log.Error(err)
return err
}
}

if len(catalog_error) > 0 {
if err := txn.Create(
&catalog_error,
).Error; err != nil {
log.Error(err)
return err
}
}

if len(resources) > 0 {
if err := txn.Create(
&resources,
).Error; err != nil {
log.Error(err)
return err
}
}

if len(resource_versions) > 0 {
if err := txn.Create(
&resource_versions,
).Error; err != nil {
log.Error(err)
return err
}
}

if len(tags) > 0 {
if err := txn.Create(
&tags,
).Error; err != nil {
log.Error(err)
return err
}
}

if len(user) > 0 {
if err := txn.Create(
&user,
).Error; err != nil {
log.Error(err)
return err
}
}

if len(user_rating) > 0 {
if err := txn.Create(
&user_rating,
).Error; err != nil {
log.Error(err)
return err
}
}

if len(sync_job) > 0 {
if err := txn.Create(
&sync_job,
).Error; err != nil {
log.Error(err)
return err
}
}

if len(scopes) > 0 {
if err := txn.Create(
&scopes,
).Error; err != nil {
log.Error(err)
return err
}
}

if len(user_scopes) > 0 {
if err := txn.Create(
&user_scopes,
).Error; err != nil {
log.Error(err)
return err
}
}

if len(configs) > 0 {
if err := txn.Create(
&configs,
).Error; err != nil {
log.Error(err)
return err
}
}

if len(resource_tags) > 0 {
if err := txn.Create(
&resource_tags,
).Error; err != nil {
log.Error(err)
return err
}
}
return nil
}
3 changes: 1 addition & 2 deletions api/pkg/db/migration/migration.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,8 @@ func Migrate(api *app.APIBase) error {
addRefreshTokenChecksumColumnInUserTable(log),
updateCatalogBranchToMain(log),
addAvatarURLColumnInUsersTable(log),
removeCatgoryIdColumnAndConstraintsFromTagtable(log),
refreshAllTables(log),
updateResourcesCategoryTable(log),
removeExistingCategories(log),
},
)

Expand Down
7 changes: 4 additions & 3 deletions api/test/fixtures/sync_jobs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,12 @@
# NOTE:
# This record is added to make sure the sync_jobs table state remains same
# for every catalog refresh test. not adding this will fails the test due to the
# sequence which will be out of sync and have to manually bring it back in sync.
# sequence which will be out of sync and have to manually bring it back in sync.
# Error: ERROR: duplicate key value violates unique constraint "sync_jobs_pkey"

- id: 1
user_id: 1
user_id: 11
catalog_id: 1
status: done
created_at: 2016-01-01 12:30:12 UTC
updated_at: 2016-01-01 12:30:12 UTC
updated_at: 2016-01-01 12:30:12 UTC

0 comments on commit 51e5b00

Please sign in to comment.