Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Detect migrating batch size #7353

Merged
merged 4 commits into from
Jul 6, 2019
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions models/models.go
Original file line number Diff line number Diff line change
Expand Up @@ -368,3 +368,9 @@ func DumpDatabase(filePath string, dbType string) error {
}
return x.DumpTablesToFile(tbs, filePath)
}

// MaxBatchInsertSize returns the table's max batch insert size
func MaxBatchInsertSize(bean interface{}) int {
t := x.TableInfo(bean)
return 999 / len(t.ColumnsSeq())
}
1 change: 1 addition & 0 deletions modules/migrations/base/uploader.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ package base

// Uploader uploads all the informations of one repository
type Uploader interface {
MaxBatchInsertSize(tp string) int
CreateRepo(repo *Repository, opts MigrateOptions) error
CreateMilestones(milestones ...*Milestone) error
CreateReleases(releases ...*Release) error
Expand Down
19 changes: 19 additions & 0 deletions modules/migrations/gitea.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,25 @@ func NewGiteaLocalUploader(doer *models.User, repoOwner, repoName string) *Gitea
}
}

// MaxBatchInsertSize returns the table's max batch insert size
func (g *GiteaLocalUploader) MaxBatchInsertSize(tp string) int {
switch tp {
case "issue":
return models.MaxBatchInsertSize(new(models.Issue))
case "comment":
return models.MaxBatchInsertSize(new(models.Comment))
case "milestone":
return models.MaxBatchInsertSize(new(models.Milestone))
case "label":
return models.MaxBatchInsertSize(new(models.Label))
case "release":
return models.MaxBatchInsertSize(new(models.Release))
case "pullrequest":
return models.MaxBatchInsertSize(new(models.PullRequest))
}
return 10
}

// CreateRepo creates a repository
func (g *GiteaLocalUploader) CreateRepo(repo *base.Repository, opts base.MigrateOptions) error {
owner, err := models.GetUserByName(g.repoOwner)
Expand Down
64 changes: 47 additions & 17 deletions modules/migrations/migrate.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,16 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts
return err
}

if err := uploader.CreateMilestones(milestones...); err != nil {
return err
msBatchSize := uploader.MaxBatchInsertSize("milestone")
for len(milestones) > 0 {
if len(milestones) < msBatchSize {
msBatchSize = len(milestones)
}

if err := uploader.CreateMilestones(milestones...); err != nil {
return err
}
milestones = milestones[msBatchSize:]
}
}

Expand All @@ -103,8 +111,16 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts
return err
}

if err := uploader.CreateLabels(labels...); err != nil {
return err
lbBatchSize := uploader.MaxBatchInsertSize("label")
for len(labels) > 0 {
if len(labels) < lbBatchSize {
lbBatchSize = len(labels)
}

if err := uploader.CreateLabels(labels...); err != nil {
return err
}
labels = labels[lbBatchSize:]
}
}

Expand All @@ -115,15 +131,27 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts
return err
}

if err := uploader.CreateReleases(releases...); err != nil {
return err
relBatchSize := uploader.MaxBatchInsertSize("release")
for len(releases) > 0 {
if len(releases) < relBatchSize {
relBatchSize = len(releases)
}

if err := uploader.CreateReleases(releases[:relBatchSize]...); err != nil {
return err
}
releases = releases[relBatchSize:]
}
}

var commentBatchSize = uploader.MaxBatchInsertSize("comment")

if opts.Issues {
log.Trace("migrating issues and comments")
var issueBatchSize = uploader.MaxBatchInsertSize("issue")

for i := 1; ; i++ {
issues, isEnd, err := downloader.GetIssues(i, 100)
issues, isEnd, err := downloader.GetIssues(i, issueBatchSize)
if err != nil {
return err
}
Expand All @@ -141,7 +169,7 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts
continue
}

var allComments = make([]*base.Comment, 0, 100)
var allComments = make([]*base.Comment, 0, commentBatchSize)
for _, issue := range issues {
comments, err := downloader.GetComments(issue.Number)
if err != nil {
Expand All @@ -154,11 +182,12 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts
}
allComments = append(allComments, comments...)

if len(allComments) >= 100 {
if err := uploader.CreateComments(allComments...); err != nil {
if len(allComments) >= commentBatchSize {
if err := uploader.CreateComments(allComments[:commentBatchSize]...); err != nil {
return err
}
allComments = make([]*base.Comment, 0, 100)

allComments = allComments[commentBatchSize:]
}
}

Expand All @@ -176,8 +205,9 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts

if opts.PullRequests {
log.Trace("migrating pull requests and comments")
var prBatchSize = models.MaxBatchInsertSize("pullrequest")
for i := 1; ; i++ {
prs, err := downloader.GetPullRequests(i, 100)
prs, err := downloader.GetPullRequests(i, prBatchSize)
if err != nil {
return err
}
Expand All @@ -195,7 +225,7 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts
continue
}

var allComments = make([]*base.Comment, 0, 100)
var allComments = make([]*base.Comment, 0, commentBatchSize)
for _, pr := range prs {
comments, err := downloader.GetComments(pr.Number)
if err != nil {
Expand All @@ -209,11 +239,11 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts

allComments = append(allComments, comments...)

if len(allComments) >= 100 {
if err := uploader.CreateComments(allComments...); err != nil {
if len(allComments) >= commentBatchSize {
if err := uploader.CreateComments(allComments[:commentBatchSize]...); err != nil {
return err
}
allComments = make([]*base.Comment, 0, 100)
allComments = allComments[commentBatchSize:]
}
}
if len(allComments) > 0 {
Expand All @@ -222,7 +252,7 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts
}
}

if len(prs) < 100 {
if len(prs) < prBatchSize {
break
}
}
Expand Down