diff --git a/.travis.yml b/.travis.yml
index fc800010d..7e6546c2c 100755
--- a/.travis.yml
+++ b/.travis.yml
@@ -2,7 +2,7 @@ dist: xenial
sudo: required
language: go
-go_import_path: github.com/opensds/opensds
+go_import_path: github.com/sodafoundation/controller
go:
- 1.12.x
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index 31a5b045c..2c44679f3 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -63,7 +63,7 @@ Additionally, community organizers are available to help community members engag
If you feel you have been falsely or unfairly accused of violating this Code of Conduct, you should notify OpenSDS with a concise description of your grievance. Your grievance will be handled in accordance with our existing governing policies.
-[Policy](https://github.com/opensds/opensds/blob/master/LICENSE)
+[Policy](https://github.com/sodafoundation/controller/blob/master/LICENSE)
## 8. Scope
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 85b6c1dcd..23ec7b9d2 100755
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,8 +1,8 @@
# OpenSDS
-[![Go Report Card](https://goreportcard.com/badge/github.com/opensds/opensds?branch=master)](https://goreportcard.com/report/github.com/opensds/opensds)
-[![Build Status](https://travis-ci.org/opensds/opensds.svg?branch=master)](https://travis-ci.org/opensds/opensds)
-[![Coverage Status](https://coveralls.io/repos/github/opensds/opensds/badge.svg?branch=master)](https://coveralls.io/github/opensds/opensds?branch=master)
+[![Go Report Card](https://goreportcard.com/badge/github.com/sodafoundation/controller?branch=master)](https://goreportcard.com/report/github.com/sodafoundation/controller)
+[![Build Status](https://travis-ci.org/sodafoundation/controller.svg?branch=master)](https://travis-ci.org/sodafoundation/controller)
+[![Coverage Status](https://coveralls.io/repos/github/sodafoundation/controller/badge.svg?branch=master)](https://coveralls.io/github/sodafoundation/controller?branch=master)
@@ -48,10 +48,10 @@ When reporting issues, refer to this format:
### Propose PRs
-- Raise your idea as an [issue](https://github.com/opensds/opensds/issues)
+- Raise your idea as an [issue](https://github.com/sodafoundation/controller/issues)
- If it is a new feature that needs lots of design details, a design proposal should also be submitted [here](https://github.com/opensds/design-specs/pulls).
- After reaching consensus in the issue discussions and design proposal reviews, complete the development on the forked repo and submit a PR.
- Here are the [PRs](https://github.com/opensds/opensds/pulls?q=is%3Apr+is%3Aclosed) that are already closed.
+ Here are the [PRs](https://github.com/sodafoundation/controller/pulls?q=is%3Apr+is%3Aclosed) that are already closed.
- If a PR is submitted by one of the core members, it has to be merged by a different core member.
- After PR is sufficiently discussed, it will get merged, abondoned or rejected depending on the outcome of the discussion.
diff --git a/INSTALL.md b/INSTALL.md
index 451ff2d2a..55d9cd30b 100755
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -1 +1 @@
-The installation documents have been moved into: [https://github.com/opensds/opensds/wiki](https://github.com/opensds/opensds/wiki)
+The installation documents have been moved into: [https://github.com/sodafoundation/controller/wiki](https://github.com/sodafoundation/controller/wiki)
diff --git a/Makefile b/Makefile
index 83fb118c5..87932ce55 100755
--- a/Makefile
+++ b/Makefile
@@ -26,7 +26,7 @@ ubuntu-dev-setup:
sudo apt-get update && sudo apt-get install -y \
build-essential gcc librados-dev librbd-dev
-build: prebuild osdsdock osdslet osdsapiserver osdsctl metricexporter
+build: prebuild osdslet
prebuild:
mkdir -p $(BUILD_DIR)
@@ -34,19 +34,19 @@ prebuild:
.PHONY: osdsdock osdslet osdsapiserver osdsctl docker test protoc goimports
osdsdock:
- go build -ldflags '-w -s' -o $(BUILD_DIR)/bin/osdsdock github.com/opensds/opensds/cmd/osdsdock
+ go build -ldflags '-w -s' -o $(BUILD_DIR)/bin/osdsdock github.com/sodafoundation/controller/cmd/osdsdock
osdslet:
- go build -ldflags '-w -s' -o $(BUILD_DIR)/bin/osdslet github.com/opensds/opensds/cmd/osdslet
+ go build -ldflags '-w -s' -o $(BUILD_DIR)/bin/osdslet github.com/sodafoundation/controller/cmd/osdslet
osdsapiserver:
- go build -ldflags '-w -s' -o $(BUILD_DIR)/bin/osdsapiserver github.com/opensds/opensds/cmd/osdsapiserver
+ go build -ldflags '-w -s' -o $(BUILD_DIR)/bin/osdsapiserver github.com/sodafoundation/controller/cmd/osdsapiserver
osdsctl:
- go build -ldflags '-w -s' -o $(BUILD_DIR)/bin/osdsctl github.com/opensds/opensds/osdsctl
+ go build -ldflags '-w -s' -o $(BUILD_DIR)/bin/osdsctl github.com/sodafoundation/controller/osdsctl
metricexporter:
- go build -ldflags '-w -s' -o $(BUILD_DIR)/bin/lvm_exporter github.com/opensds/opensds/contrib/exporters/lvm_exporter
+ go build -ldflags '-w -s' -o $(BUILD_DIR)/bin/lvm_exporter github.com/sodafoundation/controller/contrib/exporters/lvm_exporter
docker: build
cp $(BUILD_DIR)/bin/osdsdock ./cmd/osdsdock
diff --git a/README.md b/README.md
index cda1dd761..733022ed5 100755
--- a/README.md
+++ b/README.md
@@ -1,18 +1,18 @@
# OpenSDS
-[![Go Report Card](https://goreportcard.com/badge/github.com/opensds/opensds?branch=master)](https://goreportcard.com/report/github.com/opensds/opensds)
-[![Build Status](https://travis-ci.org/opensds/opensds.svg?branch=master)](https://travis-ci.org/opensds/opensds)
-[![codecov.io](https://codecov.io/github/opensds/opensds/coverage.svg?branch=master)](https://codecov.io/github/opensds/opensds?branch=master)
-[![Releases](https://img.shields.io/github/release/opensds/opensds/all.svg?style=flat-square)](https://github.com/opensds/opensds/releases)
-[![LICENSE](https://img.shields.io/github/license/opensds/opensds.svg?style=flat-square)](https://github.com/opensds/opensds/blob/master/LICENSE)
+[![Go Report Card](https://goreportcard.com/badge/github.com/sodafoundation/controller?branch=master)](https://goreportcard.com/report/github.com/sodafoundation/controller)
+[![Build Status](https://travis-ci.org/sodafoundation/controller.svg?branch=master)](https://travis-ci.org/sodafoundation/controller)
+[![codecov.io](https://codecov.io/github/sodafoundation/controller/coverage.svg?branch=master)](https://codecov.io/github/sodafoundation/controller?branch=master)
+[![Releases](https://img.shields.io/github/release/sodafoundation/controller/all.svg?style=flat-square)](https://github.com/sodafoundation/controller/releases)
+[![LICENSE](https://img.shields.io/github/license/sodafoundation/controller.svg?style=flat-square)](https://github.com/sodafoundation/controller/blob/master/LICENSE)
## Latest Release: v0.10.1 Daito
-[OpenAPI doc](http://petstore.swagger.io/?url=https://raw.githubusercontent.com/opensds/opensds/v0.10.1/openapi-spec/swagger.yaml)
+[OpenAPI doc](http://petstore.swagger.io/?url=https://raw.githubusercontent.com/sodafoundation/controller/v0.10.1/openapi-spec/swagger.yaml)
-[Release notes](https://github.com/opensds/opensds/releases/tag/v0.10.1)
+[Release notes](https://github.com/sodafoundation/controller/releases/tag/v0.10.1)
## Introduction
diff --git a/client/README.md b/client/README.md
index 1e252f2da..9fe4c1c5b 100755
--- a/client/README.md
+++ b/client/README.md
@@ -14,7 +14,7 @@ package main
import (
"fmt"
- "github.com/opensds/opensds/client"
+ "github.com/sodafoundation/controller/client"
)
func main() {
@@ -39,8 +39,8 @@ package main
import (
"fmt"
- "github.com/opensds/opensds/client"
- "github.com/opensds/opensds/pkg/model"
+ "github.com/sodafoundation/controller/client"
+ "github.com/sodafoundation/controller/pkg/model"
)
func main() {
diff --git a/client/auth.go b/client/auth.go
index e8d0e006b..2435cc3eb 100644
--- a/client/auth.go
+++ b/client/auth.go
@@ -18,8 +18,8 @@ import (
"fmt"
"os"
- "github.com/opensds/opensds/pkg/utils/constants"
- "github.com/opensds/opensds/pkg/utils/pwd"
+ "github.com/sodafoundation/controller/pkg/utils/constants"
+ "github.com/sodafoundation/controller/pkg/utils/pwd"
)
const (
diff --git a/client/client.go b/client/client.go
index 36452fdb5..45db6ae69 100755
--- a/client/client.go
+++ b/client/client.go
@@ -22,7 +22,7 @@ import (
"os"
"strings"
- "github.com/opensds/opensds/pkg/utils/constants"
+ "github.com/sodafoundation/controller/pkg/utils/constants"
)
const (
diff --git a/client/dock.go b/client/dock.go
index 1878de6a6..5232634fd 100755
--- a/client/dock.go
+++ b/client/dock.go
@@ -17,8 +17,8 @@ package client
import (
"strings"
- "github.com/opensds/opensds/pkg/model"
- "github.com/opensds/opensds/pkg/utils/urls"
+ "github.com/sodafoundation/controller/pkg/model"
+ "github.com/sodafoundation/controller/pkg/utils/urls"
)
func NewDockMgr(r Receiver, edp string, tenantId string) *DockMgr {
diff --git a/client/dock_test.go b/client/dock_test.go
index b2c473074..be5c789b7 100755
--- a/client/dock_test.go
+++ b/client/dock_test.go
@@ -18,7 +18,7 @@ import (
"reflect"
"testing"
- "github.com/opensds/opensds/pkg/model"
+ "github.com/sodafoundation/controller/pkg/model"
)
var fd = &DockMgr{
diff --git a/client/fake.go b/client/fake.go
index 18d3397be..7bb904369 100644
--- a/client/fake.go
+++ b/client/fake.go
@@ -21,8 +21,8 @@ import (
"strings"
"sync"
- "github.com/opensds/opensds/pkg/model"
- . "github.com/opensds/opensds/testutils/collection"
+ "github.com/sodafoundation/controller/pkg/model"
+ . "github.com/sodafoundation/controller/testutils/collection"
)
var (
diff --git a/client/fileshare.go b/client/fileshare.go
index 60c788113..90f995554 100644
--- a/client/fileshare.go
+++ b/client/fileshare.go
@@ -17,8 +17,8 @@ package client
import (
"strings"
- "github.com/opensds/opensds/pkg/model"
- "github.com/opensds/opensds/pkg/utils/urls"
+ "github.com/sodafoundation/controller/pkg/model"
+ "github.com/sodafoundation/controller/pkg/utils/urls"
)
// FileShareBuilder contains request body of handling a fileshare request.
diff --git a/client/fileshare_test.go b/client/fileshare_test.go
index 69e86628e..714bd92ad 100644
--- a/client/fileshare_test.go
+++ b/client/fileshare_test.go
@@ -18,8 +18,8 @@ import (
"reflect"
"testing"
- "github.com/opensds/opensds/pkg/model"
- . "github.com/opensds/opensds/testutils/collection"
+ "github.com/sodafoundation/controller/pkg/model"
+ . "github.com/sodafoundation/controller/testutils/collection"
)
var fakeShareMgr = &FileShareMgr{
diff --git a/client/host.go b/client/host.go
index e5a3b4d32..7e6bf8a46 100644
--- a/client/host.go
+++ b/client/host.go
@@ -17,8 +17,8 @@ package client
import (
"strings"
- "github.com/opensds/opensds/pkg/model"
- "github.com/opensds/opensds/pkg/utils/urls"
+ "github.com/sodafoundation/controller/pkg/model"
+ "github.com/sodafoundation/controller/pkg/utils/urls"
)
// HostBuilder contains request body of handling a host request.
diff --git a/client/host_test.go b/client/host_test.go
index c051a94a3..c42a341ce 100644
--- a/client/host_test.go
+++ b/client/host_test.go
@@ -18,7 +18,7 @@ import (
"reflect"
"testing"
- "github.com/opensds/opensds/pkg/model"
+ "github.com/sodafoundation/controller/pkg/model"
)
var fakeHostMgr = &HostMgr{
diff --git a/client/pool.go b/client/pool.go
index 2325cc01f..4c7149b2d 100755
--- a/client/pool.go
+++ b/client/pool.go
@@ -17,8 +17,8 @@ package client
import (
"strings"
- "github.com/opensds/opensds/pkg/model"
- "github.com/opensds/opensds/pkg/utils/urls"
+ "github.com/sodafoundation/controller/pkg/model"
+ "github.com/sodafoundation/controller/pkg/utils/urls"
)
// NewPoolMgr
diff --git a/client/pool_test.go b/client/pool_test.go
index 3a2ff1f06..2ae2052c8 100755
--- a/client/pool_test.go
+++ b/client/pool_test.go
@@ -17,7 +17,7 @@ import (
"reflect"
"testing"
- "github.com/opensds/opensds/pkg/model"
+ "github.com/sodafoundation/controller/pkg/model"
)
var fp = &PoolMgr{
diff --git a/client/profile.go b/client/profile.go
index 30071e432..b5b0c93a1 100755
--- a/client/profile.go
+++ b/client/profile.go
@@ -17,8 +17,8 @@ package client
import (
"strings"
- "github.com/opensds/opensds/pkg/model"
- "github.com/opensds/opensds/pkg/utils/urls"
+ "github.com/sodafoundation/controller/pkg/model"
+ "github.com/sodafoundation/controller/pkg/utils/urls"
)
// ProfileBuilder contains request body of handling a profile request.
diff --git a/client/profile_test.go b/client/profile_test.go
index 1d0feef87..709a2858c 100755
--- a/client/profile_test.go
+++ b/client/profile_test.go
@@ -18,7 +18,7 @@ import (
"reflect"
"testing"
- "github.com/opensds/opensds/pkg/model"
+ "github.com/sodafoundation/controller/pkg/model"
)
var fpr = &ProfileMgr{
diff --git a/client/receiver.go b/client/receiver.go
index dc9bcb3db..3069db66e 100755
--- a/client/receiver.go
+++ b/client/receiver.go
@@ -30,9 +30,9 @@ import (
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack"
"github.com/gophercloud/gophercloud/openstack/identity/v3/tokens"
- "github.com/opensds/opensds/pkg/model"
- "github.com/opensds/opensds/pkg/utils"
- "github.com/opensds/opensds/pkg/utils/constants"
+ "github.com/sodafoundation/controller/pkg/model"
+ "github.com/sodafoundation/controller/pkg/utils"
+ "github.com/sodafoundation/controller/pkg/utils/constants"
)
func NewHttpError(code int, msg string) error {
diff --git a/client/replication.go b/client/replication.go
index a7bfc699e..a07cfbef4 100644
--- a/client/replication.go
+++ b/client/replication.go
@@ -17,8 +17,8 @@ package client
import (
"strings"
- "github.com/opensds/opensds/pkg/model"
- "github.com/opensds/opensds/pkg/utils/urls"
+ "github.com/sodafoundation/controller/pkg/model"
+ "github.com/sodafoundation/controller/pkg/utils/urls"
)
type ReplicationBuilder *model.ReplicationSpec
diff --git a/client/replication_test.go b/client/replication_test.go
index 9267fc6f0..5fc6b80ba 100644
--- a/client/replication_test.go
+++ b/client/replication_test.go
@@ -18,7 +18,7 @@ import (
"reflect"
"testing"
- "github.com/opensds/opensds/pkg/model"
+ "github.com/sodafoundation/controller/pkg/model"
)
var fr = &ReplicationMgr{
diff --git a/client/version.go b/client/version.go
index 9665ea82f..36f2d030d 100644
--- a/client/version.go
+++ b/client/version.go
@@ -17,7 +17,7 @@ package client
import (
"strings"
- "github.com/opensds/opensds/pkg/model"
+ "github.com/sodafoundation/controller/pkg/model"
)
// VersionBuilder contains request body of handling a version request.
diff --git a/client/version_test.go b/client/version_test.go
index c036fc997..8938a202b 100644
--- a/client/version_test.go
+++ b/client/version_test.go
@@ -18,7 +18,7 @@ import (
"reflect"
"testing"
- "github.com/opensds/opensds/pkg/model"
+ "github.com/sodafoundation/controller/pkg/model"
)
var fakeVersion = &VersionMgr{
diff --git a/client/volume.go b/client/volume.go
index 218e4e1a0..442026680 100755
--- a/client/volume.go
+++ b/client/volume.go
@@ -17,8 +17,8 @@ package client
import (
"strings"
- "github.com/opensds/opensds/pkg/model"
- "github.com/opensds/opensds/pkg/utils/urls"
+ "github.com/sodafoundation/controller/pkg/model"
+ "github.com/sodafoundation/controller/pkg/utils/urls"
)
// VolumeBuilder contains request body of handling a volume request.
diff --git a/client/volume_test.go b/client/volume_test.go
index 09cdabb9d..c21e8348b 100755
--- a/client/volume_test.go
+++ b/client/volume_test.go
@@ -18,7 +18,7 @@ import (
"reflect"
"testing"
- "github.com/opensds/opensds/pkg/model"
+ "github.com/sodafoundation/controller/pkg/model"
)
var fv = &VolumeMgr{
diff --git a/cmd/osdsapiserver/Autobuildfile b/cmd/osdsapiserver/Autobuildfile
deleted file mode 100644
index 07909ff76..000000000
--- a/cmd/osdsapiserver/Autobuildfile
+++ /dev/null
@@ -1,14 +0,0 @@
-# Docker build usage:
-# docker build . -t opensdsio/opensds-apiserver:latest
-# Docker run usage:
-# docker run -d --net=host -v /etc/opensds:/etc/opensds opensdsio/opensds-apiserver:latest
-
-FROM ubuntu:16.04
-MAINTAINER Leon Wang
-
-
-COPY cmd/osdsapiserver/osdsapiserver /usr/bin
-
-
-# Define default command.
-CMD ["/usr/bin/osdsapiserver"]
diff --git a/cmd/osdsapiserver/Dockerfile b/cmd/osdsapiserver/Dockerfile
deleted file mode 100644
index 289b993c3..000000000
--- a/cmd/osdsapiserver/Dockerfile
+++ /dev/null
@@ -1,12 +0,0 @@
-# Docker build usage:
-# docker build . -t opensdsio/opensds-apiserver:latest
-# Docker run usage:
-# docker run -d --net=host -v /etc/opensds:/etc/opensds opensdsio/opensds-apiserver:latest
-
-FROM ubuntu:16.04
-MAINTAINER Leon Wang
-
-COPY osdsapiserver /usr/bin
-
-# Define default command.
-CMD ["/usr/bin/osdsapiserver"]
diff --git a/cmd/osdsapiserver/hooks/pre_build b/cmd/osdsapiserver/hooks/pre_build
deleted file mode 100644
index ab374bbd3..000000000
--- a/cmd/osdsapiserver/hooks/pre_build
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-cd ../..
-BASE_DIR=`pwd`
-apt-get update && apt-get install -y make wget sudo
-wget https://storage.googleapis.com/golang/go1.12.1.linux-amd64.tar.gz
-tar -C /usr/local -xzf go1.12.1.linux-amd64.tar.gz
-echo 'export PATH=$PATH:/usr/local/go/bin' >> /etc/profile
-echo 'export GOPATH=$HOME/gopath' >> /etc/profile
-source /etc/profile
-cd $HOME
-mkdir -p gopath/src/github.com/opensds/opensds
-cd gopath/src/github.com/opensds/opensds
-cp -r ${BASE_DIR}/* .
-make ubuntu-dev-setup && make docker
-cp cmd/osdsapiserver/osdsapiserver ${BASE_DIR}/cmd/osdsapiserver
diff --git a/cmd/osdsapiserver/osdsapiserver.go b/cmd/osdsapiserver/osdsapiserver.go
deleted file mode 100644
index dee792ce6..000000000
--- a/cmd/osdsapiserver/osdsapiserver.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements a entry into the OpenSDS REST service.
-
-*/
-
-package main
-
-import (
- "flag"
-
- "github.com/opensds/opensds/pkg/api"
- "github.com/opensds/opensds/pkg/db"
- . "github.com/opensds/opensds/pkg/utils/config"
- "github.com/opensds/opensds/pkg/utils/daemon"
- "github.com/opensds/opensds/pkg/utils/logs"
-)
-
-func init() {
- // Load global configuration from specified config file.
- CONF.Load()
-
- // Parse some configuration fields from command line. and it will override the value which is got from config file.
- flag.StringVar(&CONF.OsdsApiServer.ApiEndpoint, "api-endpoint", CONF.OsdsApiServer.ApiEndpoint, "Listen endpoint of api-server service")
- flag.DurationVar(&CONF.OsdsApiServer.LogFlushFrequency, "log-flush-frequency", CONF.OsdsApiServer.LogFlushFrequency, "Maximum number of seconds between log flushes")
- flag.BoolVar(&CONF.OsdsApiServer.Daemon, "daemon", CONF.OsdsApiServer.Daemon, "Run app as a daemon with -daemon=true")
- // prometheus related
- flag.StringVar(&CONF.OsdsApiServer.PrometheusConfHome, "prometheus-conf-home", CONF.OsdsApiServer.PrometheusConfHome, "Prometheus conf. path")
- flag.StringVar(&CONF.OsdsApiServer.PrometheusUrl, "prometheus-url", CONF.OsdsApiServer.PrometheusUrl, "Prometheus URL")
- flag.StringVar(&CONF.OsdsApiServer.PrometheusConfFile, "prometheus-conf-file", CONF.OsdsApiServer.PrometheusConfFile, "Prometheus conf. file")
- // alert manager related
- flag.StringVar(&CONF.OsdsApiServer.AlertmgrConfHome, "alertmgr-conf-home", CONF.OsdsApiServer.AlertmgrConfHome, "Alert manager conf. home")
- flag.StringVar(&CONF.OsdsApiServer.AlertMgrUrl, "alertmgr-url", CONF.OsdsApiServer.AlertMgrUrl, "Alert manager listen endpoint")
- flag.StringVar(&CONF.OsdsApiServer.AlertmgrConfFile, "alertmgr-conf-file", CONF.OsdsApiServer.AlertmgrConfFile, "Alert manager conf. file")
- // grafana related
- flag.StringVar(&CONF.OsdsApiServer.GrafanaConfHome, "grafana-conf-home", CONF.OsdsApiServer.GrafanaConfHome, "Grafana conf. home")
- flag.StringVar(&CONF.OsdsApiServer.GrafanaRestartCmd, "grafana-restart-cmd", CONF.OsdsApiServer.GrafanaRestartCmd, "Grafana restart command")
- flag.StringVar(&CONF.OsdsApiServer.GrafanaConfFile, "grafana-conf-file", CONF.OsdsApiServer.GrafanaConfFile, "Grafana conf file")
- flag.StringVar(&CONF.OsdsApiServer.GrafanaUrl, "grafana-url", CONF.OsdsApiServer.GrafanaUrl, "Grafana listen endpoint")
- // prometheus and alert manager configuration reload url
- flag.StringVar(&CONF.OsdsApiServer.ConfReloadUrl, "conf-reload-url", CONF.OsdsApiServer.ConfReloadUrl, "Prometheus and Alert manager conf. reload URL")
- flag.Parse()
-
- daemon.CheckAndRunDaemon(CONF.OsdsApiServer.Daemon)
-}
-
-func main() {
- // Open OpenSDS orchestrator service log file.
- logs.InitLogs(CONF.OsdsApiServer.LogFlushFrequency)
- defer logs.FlushLogs()
-
- // Set up database session.
- db.Init(&CONF.Database)
-
- // Start OpenSDS northbound REST service.
- api.Run(CONF.OsdsApiServer)
-}
diff --git a/cmd/osdsdock/Autobuildfile b/cmd/osdsdock/Autobuildfile
deleted file mode 100644
index 6e374376a..000000000
--- a/cmd/osdsdock/Autobuildfile
+++ /dev/null
@@ -1,20 +0,0 @@
-# Docker build usage:
-# docker build . -t opensdsio/opensds-dock:latest
-# Docker run usage:
-# docker run -d --privileged=true --net=host -v /etc/opensds:/etc/opensds opensdsio/opensds-dock:latest
-FROM ubuntu:16.04
-
-MAINTAINER Leon Wang
-
-COPY cmd/osdsdock/osdsdock /usr/bin
-
-# Install some packages before running command.
-RUN apt-get update && apt-get install -y \
- librados-dev librbd-dev ceph-common lvm2 udev tgt \
- && rm -rf /var/lib/apt
-RUN sed -i -e 's/udev_sync = 1/udev_sync = 0/g' /etc/lvm/lvm.conf \
- && sed -i -e 's/udev_rules = 1/udev_rules = 0/g' /etc/lvm/lvm.conf \
- && sed -i -e 's/use_lvmetad = 0/use_lvmetad =1/g' /etc/lvm/lvm.conf
-
-# Define default command.
-CMD ["/usr/bin/osdsdock"]
diff --git a/cmd/osdsdock/Dockerfile b/cmd/osdsdock/Dockerfile
deleted file mode 100755
index 2faa033e8..000000000
--- a/cmd/osdsdock/Dockerfile
+++ /dev/null
@@ -1,20 +0,0 @@
-# Docker build usage:
-# docker build . -t opensdsio/opensds-dock:latest
-# Docker run usage:
-# docker run -d --privileged=true --net=host -v /etc/opensds:/etc/opensds opensdsio/opensds-dock:latest
-
-FROM ubuntu:16.04
-MAINTAINER Leon Wang
-
-COPY osdsdock /usr/bin
-
-# Install some packages before running command.
-RUN apt-get update && apt-get install -y \
- librados-dev librbd-dev ceph-common lvm2 udev tgt \
- && rm -rf /var/lib/apt
-RUN sed -i -e 's/udev_sync = 1/udev_sync = 0/g' /etc/lvm/lvm.conf \
- && sed -i -e 's/udev_rules = 1/udev_rules = 0/g' /etc/lvm/lvm.conf \
- && sed -i -e 's/use_lvmetad = 0/use_lvmetad =1/g' /etc/lvm/lvm.conf
-
-# Define default command.
-CMD ["/usr/bin/osdsdock"]
diff --git a/cmd/osdsdock/hooks/pre_build b/cmd/osdsdock/hooks/pre_build
deleted file mode 100644
index ac846f46d..000000000
--- a/cmd/osdsdock/hooks/pre_build
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-cd ../..
-
-BASE_DIR=`pwd`
-apt-get update && apt-get install -y make wget sudo
-wget https://storage.googleapis.com/golang/go1.12.1.linux-amd64.tar.gz
-tar -C /usr/local -xzf go1.12.1.linux-amd64.tar.gz
-echo 'export PATH=$PATH:/usr/local/go/bin' >> /etc/profile
-echo 'export GOPATH=$HOME/gopath' >> /etc/profile
-source /etc/profile
-cd $HOME
-mkdir -p gopath/src/github.com/opensds/opensds
-cd gopath/src/github.com/opensds/opensds
-cp -r ${BASE_DIR}/* .
-make ubuntu-dev-setup && make docker
-cp cmd/osdsdock/osdsdock ${BASE_DIR}/cmd/osdsdock
-
-
diff --git a/cmd/osdsdock/osdsdock.go b/cmd/osdsdock/osdsdock.go
deleted file mode 100755
index e13d8ea33..000000000
--- a/cmd/osdsdock/osdsdock.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements a entry into the OpenSDS REST service.
-
-*/
-
-package main
-
-import (
- "flag"
-
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/dock"
- "github.com/opensds/opensds/pkg/model"
- . "github.com/opensds/opensds/pkg/utils/config"
- "github.com/opensds/opensds/pkg/utils/constants"
- "github.com/opensds/opensds/pkg/utils/daemon"
- "github.com/opensds/opensds/pkg/utils/logs"
-)
-
-func init() {
- // Load global configuration from specified config file.
- CONF.Load()
-
- // Parse some configuration fields from command line. and it will override the value which is got from config file.
- flag.StringVar(&CONF.OsdsDock.ApiEndpoint, "api-endpoint", CONF.OsdsDock.ApiEndpoint, "Listen endpoint of dock service")
- flag.StringVar(&CONF.OsdsDock.DockType, "dock-type", CONF.OsdsDock.DockType, "Type of dock service")
- flag.BoolVar(&CONF.OsdsDock.Daemon, "daemon", CONF.OsdsDock.Daemon, "Run app as a daemon with -daemon=true")
- flag.DurationVar(&CONF.OsdsDock.LogFlushFrequency, "log-flush-frequency", CONF.OsdsDock.LogFlushFrequency, "Maximum number of seconds between log flushes")
- flag.Parse()
-
- daemon.CheckAndRunDaemon(CONF.OsdsDock.Daemon)
-}
-
-func main() {
- // Open OpenSDS dock service log file.
- logs.InitLogs(CONF.OsdsDock.LogFlushFrequency)
- defer logs.FlushLogs()
-
- // Set up database session.
- db.Init(&CONF.Database)
-
- // FixMe: osdsdock attacher service needs to specify the endpoint via configuration file,
- // so add this temporarily.
- listenEndpoint := constants.OpensdsDockBindEndpoint
- if CONF.OsdsDock.DockType == model.DockTypeAttacher {
- listenEndpoint = CONF.OsdsDock.ApiEndpoint
- }
- // Construct dock module grpc server struct and run dock server process.
- ds := dock.NewDockServer(CONF.OsdsDock.DockType, listenEndpoint)
- if err := ds.Run(); err != nil {
- panic(err)
- }
-}
diff --git a/cmd/osdslet/hooks/pre_build b/cmd/osdslet/hooks/pre_build
index 263241c36..488221f43 100644
--- a/cmd/osdslet/hooks/pre_build
+++ b/cmd/osdslet/hooks/pre_build
@@ -23,8 +23,8 @@ echo 'export PATH=$PATH:/usr/local/go/bin' >> /etc/profile
echo 'export GOPATH=$HOME/gopath' >> /etc/profile
source /etc/profile
cd $HOME
-mkdir -p gopath/src/github.com/opensds/opensds
-cd gopath/src/github.com/opensds/opensds
+mkdir -p gopath/src/github.com/sodafoundation/controller
+cd gopath/src/github.com/sodafoundation/controller
cp -r ${BASE_DIR}/* .
make ubuntu-dev-setup && make docker
cp cmd/osdslet/osdslet ${BASE_DIR}/cmd/osdslet
diff --git a/cmd/osdslet/osdslet.go b/cmd/osdslet/osdslet.go
index d4ce94186..7cebcbb1e 100755
--- a/cmd/osdslet/osdslet.go
+++ b/cmd/osdslet/osdslet.go
@@ -22,12 +22,12 @@ package main
import (
"flag"
- c "github.com/opensds/opensds/pkg/controller"
- "github.com/opensds/opensds/pkg/db"
- . "github.com/opensds/opensds/pkg/utils/config"
- "github.com/opensds/opensds/pkg/utils/constants"
- "github.com/opensds/opensds/pkg/utils/daemon"
- "github.com/opensds/opensds/pkg/utils/logs"
+ c "github.com/sodafoundation/controller/pkg/controller"
+ "github.com/sodafoundation/controller/pkg/db"
+ . "github.com/sodafoundation/controller/pkg/utils/config"
+ "github.com/sodafoundation/controller/pkg/utils/constants"
+ "github.com/sodafoundation/controller/pkg/utils/daemon"
+ "github.com/sodafoundation/controller/pkg/utils/logs"
)
func init() {
diff --git a/contrib/backup/driver.go b/contrib/backup/driver.go
deleted file mode 100644
index 6f12edcd3..000000000
--- a/contrib/backup/driver.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package backup
-
-import (
- "fmt"
- "os"
-)
-
-type BackupSpec struct {
- Id string
- Name string
- Metadata map[string]string
-}
-
-type BackupDriver interface {
- SetUp() error
- Backup(backup *BackupSpec, volumeFile *os.File) error
- Restore(backup *BackupSpec, backupId string, volFile *os.File) error
- Delete(backup *BackupSpec) error
- CleanUp() error
-}
-
-type ctorFun func() (BackupDriver, error)
-
-var ctorFunMap = map[string]ctorFun{}
-
-func NewBackup(backupDriverName string) (BackupDriver, error) {
- fun, exist := ctorFunMap[backupDriverName]
- if !exist {
- return nil, fmt.Errorf("specified backup driver does not exist")
- }
-
- drv, err := fun()
- if err != nil {
- return nil, err
- }
- return drv, nil
-}
-
-func RegisterBackupCtor(bType string, fun ctorFun) error {
- if _, exist := ctorFunMap[bType]; exist {
- return fmt.Errorf("backup driver construct function %s already exist", bType)
- }
- ctorFunMap[bType] = fun
- return nil
-}
-
-func UnregisterBackupCtor(cType string) {
- if _, exist := ctorFunMap[cType]; !exist {
- return
- }
-
- delete(ctorFunMap, cType)
- return
-}
diff --git a/contrib/backup/multicloud/client.go b/contrib/backup/multicloud/client.go
deleted file mode 100644
index d64067138..000000000
--- a/contrib/backup/multicloud/client.go
+++ /dev/null
@@ -1,351 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package multicloud
-
-import (
- "encoding/xml"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/url"
- "path"
- "strconv"
- "time"
-
- "github.com/astaxie/beego/httplib"
- log "github.com/golang/glog"
- "github.com/gophercloud/gophercloud"
- "github.com/gophercloud/gophercloud/openstack"
- "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens"
- "github.com/opensds/opensds/pkg/utils/pwd"
-)
-
-const (
- DefaultTenantId = "adminTenantId"
- DefaultTimeout = 60 // in Seconds
- DefaultUploadTimeout = 30 // in Seconds
- ApiVersion = "v1"
-)
-
-type Client struct {
- endpoint string
- tenantId string
- version string
- baseURL string
- auth *AuthOptions
- token *tokens.Token
- timeout time.Duration
- uploadTimeout time.Duration
-}
-
-func NewClient(endpooint string, opt *AuthOptions, uploadTimeout int64) (*Client, error) {
- u, err := url.Parse(endpooint)
- if err != nil {
- return nil, err
- }
- u.Path = path.Join(u.Path, ApiVersion)
- baseURL := u.String() + "/"
-
- client := &Client{
- endpoint: endpooint,
- tenantId: DefaultTenantId,
- version: ApiVersion,
- baseURL: baseURL,
- timeout: time.Duration(DefaultTimeout) * time.Minute,
- uploadTimeout: time.Duration(uploadTimeout) * time.Minute,
- auth: opt,
- }
-
- if opt.Strategy == "keystone" {
- if err := client.UpdateToken(); err != nil {
- return nil, err
- }
- }
- return client, nil
-}
-
-type ReqSettingCB func(req *httplib.BeegoHTTPRequest) error
-
-func (c *Client) getToken(opt *AuthOptions) (*tokens.CreateResult, error) {
- var pwdCiphertext = opt.Password
-
- if opt.EnableEncrypted {
- // Decrypte the password
- pwdTool := pwd.NewPwdEncrypter(opt.PwdEncrypter)
- password, err := pwdTool.Decrypter(pwdCiphertext)
- if err != nil {
- return nil, err
- }
- pwdCiphertext = password
- }
-
- auth := gophercloud.AuthOptions{
- IdentityEndpoint: opt.AuthUrl,
- DomainName: opt.DomainName,
- Username: opt.UserName,
- Password: pwdCiphertext,
- TenantName: opt.TenantName,
- }
-
- provider, err := openstack.AuthenticatedClient(auth)
- if err != nil {
- log.Error("When get auth client:", err)
- return nil, err
- }
-
- // Only support keystone v3
- identity, err := openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{})
- if err != nil {
- log.Error("When get identity session:", err)
- return nil, err
- }
- r := tokens.Create(identity, &auth)
- return &r, nil
-}
-
-func (c *Client) UpdateToken() error {
- t, err := c.getToken(c.auth)
- if err != nil {
- log.Errorf("Get token failed, %v", err)
- return err
- }
- project, err := t.ExtractProject()
- if err != nil {
- log.Errorf("extract project failed, %v", err)
- return err
- }
- c.tenantId = project.ID
- token, err := t.ExtractToken()
- if err != nil {
- log.Errorf("extract token failed, %v", err)
- return err
- }
- c.token = token
- log.V(5).Infof("TokenId:%s, ExpiresAt:%v", token.ID, token.ExpiresAt)
- return nil
-}
-
-func (c *Client) doRequest(method, u string, in interface{}, cb ReqSettingCB) ([]byte, http.Header, error) {
- req := httplib.NewBeegoRequest(u, method)
- req.Header("Content-Type", "application/xml")
- if c.auth.Strategy == "keystone" {
- beforeExpires := c.token.ExpiresAt.Add(time.Minute)
- if time.Now().After(beforeExpires) {
- log.Warning("token is about to expire, update it")
- if err := c.UpdateToken(); err != nil {
- return nil, nil, err
- }
- }
- req.Header("X-Auth-Token", c.token.ID)
- }
-
- req.SetTimeout(c.timeout, c.timeout)
- if cb != nil {
- if err := cb(req); err != nil {
- return nil, nil, err
- }
- }
-
- if in != nil {
- var body interface{}
- switch in.(type) {
- case string, []byte:
- body = in
- default:
- body, _ = xml.Marshal(in)
- }
- req.Body(body)
- }
-
- resp, err := req.Response()
- if err != nil {
- log.Errorf("Do http request failed, method: %s\n url: %s\n error: %v", method, u, err)
- return nil, nil, err
- }
-
- log.V(5).Infof("%s: %s OK\n", method, u)
- rbody, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- log.Errorf("Get byte[] from response failed, method: %s\n url: %s\n error: %v", method, u, err)
- return nil, nil, err
- }
- return rbody, resp.Header, nil
-}
-
-func (c *Client) request(method, p string, in, out interface{}, cb ReqSettingCB) error {
- u, err := url.Parse(p)
- if err != nil {
- return err
- }
- base, err := url.Parse(c.baseURL)
- if err != nil {
- return err
- }
-
- fullUrl := base.ResolveReference(u)
- b, _, err := c.doRequest(method, fullUrl.String(), in, cb)
- if err != nil {
- return err
- }
-
- if out != nil {
- log.V(5).Infof("Response:\n%s\n", string(b))
- err := xml.Unmarshal(b, out)
- if err != nil {
- log.Errorf("unmarshal error, reason:%v", err)
- return err
- }
- }
- return nil
-}
-
-type Object struct {
- ObjectKey string `xml:"ObjectKey"`
- BucketName string `xml:"BucketName"`
- Size uint64 `xml:"Size"`
-}
-
-type ListObjectResponse struct {
- ListObjects []Object `xml:"ListObjects"`
-}
-
-type InitiateMultipartUploadResult struct {
- Xmlns string `xml:"xmlns,attr"`
- Bucket string `xml:"Bucket"`
- Key string `xml:"Key"`
- UploadId string `xml:"UploadId"`
-}
-
-type UploadPartResult struct {
- Xmlns string `xml:"xmlns,attr"`
- PartNumber int64 `xml:"PartNumber"`
- ETag string `xml:"ETag"`
-}
-
-type Part struct {
- PartNumber int64 `xml:"PartNumber"`
- ETag string `xml:"ETag"`
-}
-
-type CompleteMultipartUpload struct {
- Xmlns string `xml:"xmlns,attr"`
- Part []Part `xml:"Part"`
-}
-
-type CompleteMultipartUploadResult struct {
- Xmlns string `xml:"xmlns,attr"`
- Location string `xml:"Location"`
- Bucket string `xml:"Bucket"`
- Key string `xml:"Key"`
- ETag string `xml:"ETag"`
-}
-
-func (c *Client) UploadObject(bucketName, objectKey string, data []byte) error {
- p := path.Join("s3", bucketName, objectKey)
- err := c.request("PUT", p, data, nil, nil)
- return err
-}
-
-func (c *Client) ListObject(bucketName string) (*ListObjectResponse, error) {
- p := path.Join("s3", bucketName)
- object := &ListObjectResponse{}
- if err := c.request("GET", p, nil, object, nil); err != nil {
- return nil, err
- }
- return object, nil
-}
-
-func (c *Client) RemoveObject(bucketName, objectKey string) error {
- p := path.Join("s3", bucketName, objectKey)
- err := c.request("DELETE", p, nil, nil, nil)
- return err
-}
-
-func (c *Client) InitMultiPartUpload(bucketName, objectKey string) (*InitiateMultipartUploadResult, error) {
- p := path.Join("s3", bucketName, objectKey)
- p += "?uploads"
- out := &InitiateMultipartUploadResult{}
- if err := c.request("PUT", p, nil, out, nil); err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *Client) UploadPart(bucketName, objectKey string, partNum int64, uploadId string, data []byte, size int64) (*UploadPartResult, error) {
- log.Infof("upload part buf size:%d", len(data))
- p := path.Join("s3", bucketName, objectKey)
- p += fmt.Sprintf("?partNumber=%d&uploadId=%s", partNum, uploadId)
- out := &UploadPartResult{}
- reqSettingCB := func(req *httplib.BeegoHTTPRequest) error {
- req.Header("Content-Length", strconv.FormatInt(size, 10))
- req.SetTimeout(c.uploadTimeout, c.uploadTimeout)
- return nil
- }
- if err := c.request("PUT", p, data, out, reqSettingCB); err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *Client) CompleteMultipartUpload(
- bucketName string,
- objectKey string,
- uploadId string,
- input *CompleteMultipartUpload) (*CompleteMultipartUploadResult, error) {
-
- p := path.Join("s3", bucketName, objectKey)
- p += fmt.Sprintf("?uploadId=%s", uploadId)
- out := &CompleteMultipartUploadResult{}
- if err := c.request("PUT", p, input, nil, nil); err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *Client) AbortMultipartUpload(bucketName, objectKey string) error {
- // TODO: multi-cloud has not implemented it yet. so just comment it.
- //p := path.Join("s3", "AbortMultipartUpload", bucketName, objectKey)
- //if err := c.request("DELETE", p, nil, nil); err != nil {
- // return err
- //}
- return nil
-}
-
-func (c *Client) DownloadPart(bucketName, objectKey string, offset, size int64) ([]byte, error) {
- p := path.Join("s3", bucketName, objectKey)
-
- reqSettingCB := func(req *httplib.BeegoHTTPRequest) error {
- rangeStr := fmt.Sprintf("bytes:%d-%d", offset, offset+size-1)
- req.Header("Range", rangeStr)
- req.SetTimeout(c.uploadTimeout, c.uploadTimeout)
- return nil
- }
-
- u, err := url.Parse(p)
- if err != nil {
- return nil, err
- }
- base, err := url.Parse(c.baseURL)
- if err != nil {
- return nil, err
- }
-
- fullUrl := base.ResolveReference(u)
- body, _, err := c.doRequest("GET", fullUrl.String(), nil, reqSettingCB)
- if err != nil {
- return nil, err
- }
- return body, nil
-}
diff --git a/contrib/backup/multicloud/driver.go b/contrib/backup/multicloud/driver.go
deleted file mode 100644
index 834fb3a73..000000000
--- a/contrib/backup/multicloud/driver.go
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package multicloud
-
-import (
- "errors"
- "io"
- "io/ioutil"
- "os"
-
- "github.com/golang/glog"
- "github.com/opensds/opensds/contrib/backup"
- "github.com/opensds/opensds/pkg/utils"
- "gopkg.in/yaml.v2"
-)
-
-const (
- ConfFile = "/etc/opensds/driver/multi-cloud.yaml"
- ChunkSize = 1024 * 1024 * 50
-)
-
-func init() {
- backup.RegisterBackupCtor("multi-cloud", NewMultiCloud)
-}
-
-func NewMultiCloud() (backup.BackupDriver, error) {
- return &MultiCloud{}, nil
-}
-
-type AuthOptions struct {
- Strategy string `yaml:"Strategy"`
- AuthUrl string `yaml:"AuthUrl,omitempty"`
- DomainName string `yaml:"DomainName,omitempty"`
- UserName string `yaml:"UserName,omitempty"`
- Password string `yaml:"Password,omitempty"`
- PwdEncrypter string `yaml:"PwdEncrypter,omitempty"`
- EnableEncrypted bool `yaml:"EnableEncrypted,omitempty"`
- TenantName string `yaml:"TenantName,omitempty"`
-}
-
-type MultiCloudConf struct {
- Endpoint string `yaml:"Endpoint,omitempty"`
- UploadTimeout int64 `yaml:"UploadTimeout,omitempty"`
- AuthOptions `yaml:"AuthOptions,omitempty"`
-}
-
-type MultiCloud struct {
- client *Client
- conf *MultiCloudConf
-}
-
-func (m *MultiCloud) loadConf(p string) (*MultiCloudConf, error) {
- conf := &MultiCloudConf{
- Endpoint: "http://127.0.0.1:8088",
- UploadTimeout: DefaultUploadTimeout,
- }
- confYaml, err := ioutil.ReadFile(p)
- if err != nil {
- glog.Errorf("Read config yaml file (%s) failed, reason:(%v)", p, err)
- return nil, err
- }
- if err = yaml.Unmarshal(confYaml, conf); err != nil {
- glog.Errorf("Parse error: %v", err)
- return nil, err
- }
- return conf, nil
-}
-
-func (m *MultiCloud) SetUp() error {
- // Set the default value
- var err error
- if m.conf, err = m.loadConf(ConfFile); err != nil {
- return err
- }
-
- if m.client, err = NewClient(m.conf.Endpoint, &m.conf.AuthOptions, m.conf.UploadTimeout); err != nil {
- return err
- }
-
- return nil
-}
-
-func (m *MultiCloud) CleanUp() error {
- // Do nothing
- return nil
-}
-
-func (m *MultiCloud) Backup(backup *backup.BackupSpec, volFile *os.File) error {
- buf := make([]byte, ChunkSize)
- input := &CompleteMultipartUpload{}
-
- bucket, ok := backup.Metadata["bucket"]
- if !ok {
- return errors.New("can't find bucket in metadata")
- }
- key := backup.Id
- initResp, err := m.client.InitMultiPartUpload(bucket, key)
- if err != nil {
- glog.Errorf("Init part failed, err:%v", err)
- return err
- }
-
- defer m.client.AbortMultipartUpload(bucket, key)
- var parts []Part
- for partNum := int64(1); ; partNum++ {
- size, err := volFile.Read(buf)
- glog.Infof("read buf size len:%d", size)
- if err == io.EOF {
- break
- }
- if err != nil {
- return err
- }
- if size == 0 {
- break
- }
- var uploadResp *UploadPartResult
- err = utils.Retry(3, "upload part", false, func(retryIdx int, lastErr error) error {
- var inErr error
- uploadResp, inErr = m.client.UploadPart(bucket, key, partNum, initResp.UploadId, buf[:size], int64(size))
- return inErr
- })
- if err != nil {
- glog.Errorf("upload part failed, err:%v", err)
- return err
- }
- parts = append(parts, Part{PartNumber: partNum, ETag: uploadResp.ETag})
- }
- input.Part = parts
- _, err = m.client.CompleteMultipartUpload(bucket, key, initResp.UploadId, input)
- if err != nil {
- glog.Errorf("complete part failed, err:%v", err)
- return err
- }
- m.client.AbortMultipartUpload(bucket, key)
- glog.Infof("backup success ...")
- return nil
-}
-
-func (m *MultiCloud) Restore(backup *backup.BackupSpec, backupId string, volFile *os.File) error {
- bucket, ok := backup.Metadata["bucket"]
- if !ok {
- return errors.New("can't find bucket in metadata")
- }
- var downloadSize = ChunkSize
- // if the size of data of smaller than require download size
- // downloading is completed.
- for offset := int64(0); downloadSize == ChunkSize; offset += ChunkSize {
- var data []byte
- err := utils.Retry(3, "download part", false, func(retryIdx int, lastErr error) error {
- var inErr error
- data, inErr = m.client.DownloadPart(bucket, backupId, offset, ChunkSize)
- return inErr
- })
- if err != nil {
- glog.Errorf("download part failed: %v", err)
- return err
- }
- downloadSize = len(data)
- glog.V(5).Infof("download size: %d\n", downloadSize)
- volFile.Seek(offset, 0)
- size, err := volFile.Write(data)
- if err != nil {
- glog.Errorf("write part failed: %v", err)
- return err
- }
- if size != downloadSize {
- return errors.New("size not equal to download size")
- }
- glog.V(5).Infof("write buf size len:%d", size)
- }
- glog.Infof("restore success ...")
- return nil
-}
-
-func (m *MultiCloud) Delete(backup *backup.BackupSpec) error {
- bucket := backup.Metadata["bucket"]
- key := backup.Id
- return m.client.RemoveObject(bucket, key)
-}
diff --git a/contrib/backup/multicloud/driver_test.go b/contrib/backup/multicloud/driver_test.go
deleted file mode 100644
index eaab3ab5a..000000000
--- a/contrib/backup/multicloud/driver_test.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package multicloud
-
-import (
- "fmt"
- "reflect"
- "testing"
-)
-
-const (
- testConfFile = "./testdata/multi-cloud.yaml"
-)
-
-func TestLoadConf(t *testing.T) {
- m := &MultiCloud{}
- conf, err := m.loadConf(testConfFile)
- if err != nil {
- t.Errorf("load conf file failed")
- }
- expect := &MultiCloudConf{
- Endpoint: "http://127.0.0.1:8088",
- UploadTimeout: DefaultUploadTimeout,
- AuthOptions: AuthOptions{
- Strategy: "keystone",
- AuthUrl: "http://127.0.0.1/identity",
- DomainName: "Default",
- UserName: "admin",
- Password: "opensds@123",
- TenantName: "admin",
- PwdEncrypter: "aes",
- EnableEncrypted: false,
- },
- }
- fmt.Printf("%+v", conf)
- if !reflect.DeepEqual(expect, conf) {
- t.Errorf("load conf file error")
- }
-}
diff --git a/contrib/backup/multicloud/testdata/multi-cloud.yaml b/contrib/backup/multicloud/testdata/multi-cloud.yaml
deleted file mode 100644
index f89b0a490..000000000
--- a/contrib/backup/multicloud/testdata/multi-cloud.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-"Endpoint": "http://127.0.0.1:8088"
-# upload object timeout in seconds
-"UploadTimeout": 30
-AuthOptions:
- Strategy: "keystone"
- AuthUrl: "http://127.0.0.1/identity"
- DomainName: "Default"
- UserName: "admin"
- Password: "opensds@123"
- TenantName: "admin"
- # Whether to encrypt the password. If enabled, the value of the password must be ciphertext.
- EnableEncrypted: false
- # Encryption and decryption tool. Default value is aes. The decryption tool can only decrypt the corresponding ciphertext.
- PwdEncrypter: "aes"
\ No newline at end of file
diff --git a/contrib/connector/common.go b/contrib/connector/common.go
deleted file mode 100644
index 26e7402bb..000000000
--- a/contrib/connector/common.go
+++ /dev/null
@@ -1,201 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package connector
-
-import (
- "fmt"
- "log"
- "net"
- "os/exec"
- "strings"
-)
-
-// ExecCmd Log and convert the result of exec.Command
-func ExecCmd(name string, arg ...string) (string, error) {
- log.Printf("Command: %s %s:\n", name, strings.Join(arg, " "))
- info, err := exec.Command(name, arg...).CombinedOutput()
- return string(info), err
-}
-
-// GetFSType returns the File System Type of device
-func GetFSType(device string) (string, error) {
- log.Printf("GetFSType: %s\n", device)
-
- var fsType string
- blkidCmd := "blkid"
- out, err := ExecCmd("blkid", device)
- if err != nil {
- log.Printf("failed to GetFSType: %v cmd: %s output: %s\n",
- err, blkidCmd, string(out))
- return fsType, nil
- }
-
- for _, v := range strings.Split(string(out), " ") {
- if strings.Contains(v, "TYPE=") {
- fsType = strings.Split(v, "=")[1]
- fsType = strings.Replace(fsType, "\"", "", -1)
- fsType = strings.Replace(fsType, "\n", "", -1)
- fsType = strings.Replace(fsType, "\r", "", -1)
- return fsType, nil
- }
- }
-
- return fsType, nil
-}
-
-// Format device by File System Type
-func Format(device string, fsType string) error {
- log.Printf("Format device: %s fstype: %s\n", device, fsType)
-
- mkfsCmd := fmt.Sprintf("mkfs.%s", fsType)
-
- _, err := exec.LookPath(mkfsCmd)
- if err != nil {
- if err == exec.ErrNotFound {
- return fmt.Errorf("%q executable not found in $PATH", mkfsCmd)
- }
- return err
- }
-
- mkfsArgs := []string{}
- mkfsArgs = append(mkfsArgs, device)
- if fsType == "ext4" || fsType == "ext3" {
- mkfsArgs = []string{"-F", device}
- }
-
- out, err := ExecCmd(mkfsCmd, mkfsArgs...)
- if err != nil {
- return fmt.Errorf("formatting disk failed: %v cmd: '%s %s' output: %q",
- err, mkfsCmd, strings.Join(mkfsArgs, " "), string(out))
- }
-
- return nil
-}
-
-// Mount device into mount point
-func Mount(device, mountpoint, fsType string, mountFlags []string) error {
- log.Printf("Mount device: %s mountpoint: %s, fsType: %s, mountFlags: %v\n", device, mountpoint, fsType, mountFlags)
-
- _, err := ExecCmd("mkdir", "-p", mountpoint)
- if err != nil {
- log.Printf("failed to mkdir: %v\n", err)
- return err
- }
-
- mountArgs := []string{}
-
- mountArgs = append(mountArgs, "-t", fsType)
-
- if len(mountFlags) > 0 {
- mountArgs = append(mountArgs, "-o", strings.Join(mountFlags, ","))
- }
-
- mountArgs = append(mountArgs, device)
- mountArgs = append(mountArgs, mountpoint)
-
- _, err = exec.Command("mount", mountArgs...).CombinedOutput()
- if err != nil {
- log.Printf("failed to mount: %v\n", err)
- return err
- }
-
- return nil
-}
-
-// Umount from mountpoint
-func Umount(mountpoint string) error {
- log.Printf("Umount mountpoint: %s\n", mountpoint)
-
- _, err := ExecCmd("umount", mountpoint)
- if err != nil {
- log.Printf("failed to Umount: %v\n", err)
- return err
- }
-
- return nil
-}
-
-// GetHostIP return Host IP
-func GetHostIP() string {
- addrs, err := net.InterfaceAddrs()
- if err != nil {
- return "127.0.0.1"
- }
-
- for _, address := range addrs {
- if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
- return ipnet.IP.String()
- }
- }
-
- return "127.0.0.1"
-}
-
-// GetHostName ...
-func GetHostName() (string, error) {
- hostName, err := ExecCmd("hostname")
- if err != nil {
- log.Printf("failed to get host name: %v\n", err)
- return "", err
- }
- hostName = strings.Replace(hostName, "\n", "", -1)
- return hostName, nil
-}
-
-// IsMounted ...
-func IsMounted(target string) (bool, error) {
- findmntCmd := "findmnt"
- _, err := exec.LookPath(findmntCmd)
- if err != nil {
- if err == exec.ErrNotFound {
- msg := fmt.Sprintf("%s executable not found in $PATH, err: %v\n", findmntCmd, err)
- log.Printf(msg)
- return false, fmt.Errorf(msg)
- }
- log.Printf("failed to check IsMounted %v\n", err)
- return false, err
- }
-
- findmntArgs := []string{"--target", target}
-
- log.Printf("findmnt args is %s\n", findmntArgs)
-
- out, err := ExecCmd(findmntCmd, findmntArgs...)
- if err != nil {
- // findmnt exits with non zero exit status if it couldn't find anything
- if strings.TrimSpace(string(out)) == "" {
- return false, nil
- }
-
- errIsMounted := fmt.Errorf("checking mounted failed: %v cmd: %s output: %s",
- err, findmntCmd, string(out))
-
- log.Printf("checking mounted failed: %v\n", errIsMounted)
- return false, errIsMounted
- }
-
- log.Printf("checking mounted result is %s\n", strings.TrimSpace(string(out)))
- if strings.TrimSpace(string(out)) == "" {
- return false, nil
- }
-
- line := strings.Split(string(out), "\n")
-
- if strings.Split(line[1], " ")[0] != target {
- return false, nil
- }
-
- return true, nil
-}
diff --git a/contrib/connector/connector.go b/contrib/connector/connector.go
deleted file mode 100755
index 0ae5b7267..000000000
--- a/contrib/connector/connector.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package connector
-
-import (
- "fmt"
- "log"
-
- "github.com/opensds/opensds/contrib/drivers/utils/config"
-)
-
-const (
- FcDriver = config.FCProtocol
- PortName = "port_name"
- NodeName = "node_name"
- Wwpn = "wwpn"
- Wwnn = "wwnn"
-
- IscsiDriver = config.ISCSIProtocol
- Iqn = "iqn"
-
- RbdDriver = config.RBDProtocol
-
- NvmeofDriver = config.NVMEOFProtocol
- Nqn = "nqn"
- NFSDriver = config.NFSProtocol
-)
-
-// Connector implementation
-type Connector interface {
- Attach(map[string]interface{}) (string, error)
- Detach(map[string]interface{}) error
- GetInitiatorInfo() ([]string, error)
-}
-
-var cnts = map[string]Connector{}
-
-// NewConnector implementation
-func NewConnector(cType string) Connector {
- if cnt, exist := cnts[cType]; exist {
- return cnt
- }
-
- log.Printf("%s is not registered to connector", cType)
- return nil
-}
-
-// RegisterConnector implementation
-func RegisterConnector(cType string, cnt Connector) error {
- if _, exist := cnts[cType]; exist {
- return fmt.Errorf("Connector %s already exist", cType)
- }
-
- cnts[cType] = cnt
- return nil
-}
-
-// UnregisterConnector implementation
-func UnregisterConnector(cType string) {
- if _, exist := cnts[cType]; !exist {
- return
- }
-
- delete(cnts, cType)
- return
-}
diff --git a/contrib/connector/fc/fc.go b/contrib/connector/fc/fc.go
deleted file mode 100755
index 6ac1495c6..000000000
--- a/contrib/connector/fc/fc.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fc
-
-import (
- "github.com/opensds/opensds/contrib/connector"
-)
-
-// FC struct
-type FC struct{}
-
-// init ...
-func init() {
- connector.RegisterConnector(connector.FcDriver, &FC{})
-}
-
-// Attach ...
-func (f *FC) Attach(conn map[string]interface{}) (string, error) {
- deviceInfo, err := connectVolume(conn)
- if err != nil {
- return "", err
- }
- return deviceInfo["path"], nil
-}
-
-// Detach ...
-func (f *FC) Detach(conn map[string]interface{}) error {
- return disconnectVolume(conn)
-}
-
-// GetInitiatorInfo ...
-func (f *FC) GetInitiatorInfo() ([]string, error) {
- return getInitiatorInfo()
-}
diff --git a/contrib/connector/fc/fibreChannel.go b/contrib/connector/fc/fibreChannel.go
deleted file mode 100755
index 5ac8e5d5c..000000000
--- a/contrib/connector/fc/fibreChannel.go
+++ /dev/null
@@ -1,257 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fc
-
-import (
- "errors"
- "fmt"
- "log"
- "strconv"
- "strings"
- "time"
-
- "github.com/mitchellh/mapstructure"
- "github.com/opensds/opensds/contrib/connector"
-)
-
-var (
- tries = 3
-)
-
-// ConnectorInfo define
-type ConnectorInfo struct {
- AccessMode string `mapstructure:"accessMode"`
- AuthUser string `mapstructure:"authUserName"`
- AuthPass string `mapstructure:"authPassword"`
- AuthMethod string `mapstructure:"authMethod"`
- TgtDisco bool `mapstructure:"targetDiscovered"`
- TargetWWNs []string `mapstructure:"targetWWNs"`
- VolumeID string `mapstructure:"volumeId"`
- TgtLun int `mapstructure:"targetLun"`
- Encrypted bool `mapstructure:"encrypted"`
-}
-
-// ParseIscsiConnectInfo decode
-func parseFCConnectInfo(connectInfo map[string]interface{}) (*ConnectorInfo, error) {
- var con ConnectorInfo
- mapstructure.Decode(connectInfo, &con)
-
- if len(con.TargetWWNs) == 0 || con.TgtLun == 0 {
- return nil, errors.New("fibrechannel connection data invalid.")
- }
-
- return &con, nil
-}
-
-func connectVolume(connMap map[string]interface{}) (map[string]string, error) {
- conn, err := parseFCConnectInfo(connMap)
- if err != nil {
- return nil, err
- }
- hbas, err := getFChbasInfo()
- if err != nil {
- return nil, err
- }
- volPaths := getVolumePaths(conn, hbas)
- if len(volPaths) == 0 {
- errMsg := fmt.Sprintf("No FC devices found.\n")
- log.Printf(errMsg)
- return nil, errors.New(errMsg)
- }
-
- devicePath, deviceName := volPathDiscovery(volPaths, tries, conn.TargetWWNs, hbas)
- if devicePath != "" && deviceName != "" {
- log.Printf("Found Fibre Channel volume name, devicePath is %s, deviceName is %s\n", devicePath, deviceName)
- }
-
- deviceWWN, err := getSCSIWWN(devicePath)
- if err != nil {
- return nil, err
- }
-
- dmPath, err := getMultipathDevice(deviceWWN)
- if err != nil {
- return nil, err
- }
- if len(dmPath) > 0 {
- devicePath = dmPath
- }
-
- return map[string]string{"scsi_wwn": deviceWWN, "path": devicePath}, nil
-}
-
-func getVolumePaths(conn *ConnectorInfo, hbas []map[string]string) []string {
- wwnports := conn.TargetWWNs
- devices := getDevices(hbas, wwnports)
- lun := conn.TgtLun
- hostPaths := getHostDevices(devices, lun)
- return hostPaths
-}
-
-func volPathDiscovery(volPaths []string, tries int, tgtWWN []string, hbas []map[string]string) (string, string) {
- for i := 0; i < tries; i++ {
- for _, path := range volPaths {
- if pathExists(path) {
- deviceName := getContentfromSymboliclink(path)
- return path, deviceName
- }
- rescanHosts(tgtWWN, hbas)
- }
-
- time.Sleep(2 * time.Second)
- }
- return "", ""
-}
-
-func getHostDevices(devices []map[string]string, lun int) []string {
- var hostDevices []string
- for _, device := range devices {
- var hostDevice string
- for pciNum, tgtWWN := range device {
- hostDevice = fmt.Sprintf("/dev/disk/by-path/pci-%s-fc-%s-lun-%s", pciNum, tgtWWN, processLunID(lun))
- }
- hostDevices = append(hostDevices, hostDevice)
- }
- return hostDevices
-}
-
-func disconnectVolume(connMap map[string]interface{}) error {
- conn, err := parseFCConnectInfo(connMap)
- if err != nil {
- return err
- }
- volPaths, err := getVolumePathsForDetach(conn)
- if err != nil {
- return err
- }
-
- var devices []map[string]string
- for _, path := range volPaths {
- realPath := getContentfromSymboliclink(path)
- deviceInfo, _ := getDeviceInfo(realPath)
- devices = append(devices, deviceInfo)
- }
-
- return removeDevices(devices)
-}
-
-func removeDevices(devices []map[string]string) error {
- for _, device := range devices {
- path := fmt.Sprintf("/sys/block/%s/device/delete", strings.Replace(device["device"], "/dev/", "", -1))
- if pathExists(path) {
- if err := flushDeviceIO(device["device"]); err != nil {
- return err
- }
-
- if err := removeSCSIDevice(path); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func getPciNum(hba map[string]string) string {
- for k, v := range hba {
- if k == "device_path" {
- path := strings.Split(v, "/")
- for idx, u := range path {
- if strings.Contains(u, "net") || strings.Contains(u, "host") {
- return path[idx-1]
- }
- }
- }
- }
- return ""
-}
-
-func getVolumePathsForDetach(conn *ConnectorInfo) ([]string, error) {
- var volPaths []string
- hbas, err := getFChbasInfo()
- if err != nil {
- return nil, err
- }
-
- devicePaths := getVolumePaths(conn, hbas)
- for _, path := range devicePaths {
- if pathExists(path) {
- volPaths = append(volPaths, path)
- }
- }
- return volPaths, nil
-}
-
-func getDevices(hbas []map[string]string, wwnports []string) []map[string]string {
- var device []map[string]string
- for _, hba := range hbas {
- pciNum := getPciNum(hba)
- if pciNum != "" {
- for _, wwn := range wwnports {
- tgtWWN := map[string]string{pciNum: "0x" + wwn}
- device = append(device, tgtWWN)
- }
- }
- }
- return device
-}
-
-func processLunID(lunID int) string {
- if lunID < 256 {
- return strconv.Itoa(lunID)
- }
- return fmt.Sprintf("0x%04x%04x00000000", lunID&0xffff, lunID>>16&0xffff)
-}
-
-func getFChbasInfo() ([]map[string]string, error) {
- // Get Fibre Channel WWNs and device paths from the system.
- hbas, err := getFChbas()
- if err != nil {
- return nil, err
- }
- var hbasInfos []map[string]string
- for _, hba := range hbas {
- wwpn := strings.Replace(hba["port_name"], "0x", "", -1)
- wwnn := strings.Replace(hba["node_name"], "0x", "", -1)
- devicePath := hba["ClassDevicepath"]
- device := hba["ClassDevice"]
-
- hbasInfo := map[string]string{"port_name": wwpn, "node_name": wwnn, "host_device": device, "device_path": devicePath}
-
- hbasInfos = append(hbasInfos, hbasInfo)
- }
-
- return hbasInfos, nil
-}
-
-func getInitiatorInfo() ([]string, error) {
- hbas, err := getFChbasInfo()
- if err != nil {
- return nil, err
- }
-
- var initiatorInfo []string
- for _, hba := range hbas {
- if v, ok := hba[connector.PortName]; ok {
- initiatorInfo = append(initiatorInfo, v)
- }
- }
-
- //Check for atleast one initiator
- if (0 == len(initiatorInfo)){
- return nil, errors.New("No initiator info found.")
- }
-
- return initiatorInfo, nil
-}
diff --git a/contrib/connector/fc/linuxfc.go b/contrib/connector/fc/linuxfc.go
deleted file mode 100644
index 6e95874e5..000000000
--- a/contrib/connector/fc/linuxfc.go
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fc
-
-import (
- "errors"
- "fmt"
- "log"
- "os"
- "strings"
-
- "github.com/opensds/opensds/contrib/connector"
-)
-
-func getSCSIWWN(devicePath string) (string, error) {
- out, err := connector.ExecCmd("/lib/udev/scsi_id", "--page", "0x83", "--whitelisted", devicePath)
- if err != nil {
- errMsg := fmt.Sprintf("Error occurred when get device wwn: %s, %v\n", out, err)
- log.Printf(errMsg)
- return "", errors.New(errMsg)
- }
- return strings.TrimSpace(out), nil
-}
-
-func getContentfromSymboliclink(symboliclink string) string {
- out, _ := connector.ExecCmd("readlink", "-f", symboliclink)
- return strings.TrimSuffix(out, "\n")
-}
-
-func rescanHosts(tgtWWN []string, hbas []map[string]string) error {
- for _, hba := range hbas {
- cmd := fmt.Sprintf("echo \"- - -\" > /sys/class/scsi_host/%s/scan", hba["host_device"])
- out, err := connector.ExecCmd("/bin/bash", "-c", cmd)
- if err != nil {
- errMsg := fmt.Sprintf("Error occurred when rescan hosts: %s, %v\n", out, err)
- log.Printf(errMsg)
- return errors.New(errMsg)
- }
- }
- return nil
-}
-
-func getFChbas() ([]map[string]string, error) {
- if !fcSupport() {
- errMsg := fmt.Sprintf("No Fibre Channel support detected.\n")
- log.Printf(errMsg)
- return nil, errors.New(errMsg)
- }
-
- out, err := connector.ExecCmd("systool", "-c", "fc_host", "-v")
- if err != nil {
- errMsg := fmt.Sprintf("Error occurred when get FC hbas info: systool is not installed: %s, %v\n", out, err)
- log.Printf(errMsg)
- return nil, errors.New(errMsg)
- }
-
- if out == "" {
- errMsg := fmt.Sprintf("No Fibre Channel support detected.\n")
- log.Printf(errMsg)
- return nil, errors.New(errMsg)
- }
-
- lines := strings.Split(out, "\n")
- lines = lines[2:]
- hba := make(map[string]string)
- hbas := []map[string]string{}
- lastline := ""
-
- for _, line := range lines {
- line = strings.TrimSpace(line)
- // 2 newlines denotes a new hba port
- if line == "" && lastline == "" {
- if len(hba) > 0 {
- hbas = append(hbas, hba)
- hba = make(map[string]string)
- }
- } else {
- val := strings.Split(line, "=")
- if len(val) == 2 {
- key := strings.Replace(val[0], " ", "", -1)
- key = trimDoubleQuotesInText(key)
-
- val := strings.Replace(val[1], " ", "", -1)
- val = trimDoubleQuotesInText(val)
-
- hba[key] = val
- }
- }
- lastline = line
- }
- return hbas, nil
-}
-
-func removeSCSIDevice(path string) error {
- cmd := "echo 1 >" + path
- out, err := connector.ExecCmd("/bin/bash", "-c", cmd)
- if err != nil {
- errMsg := fmt.Sprintf("Error occurred when remove scsi device: %s, %v\n", out, err)
- log.Printf(errMsg)
- return errors.New(errMsg)
- }
- return nil
-}
-
-func flushDeviceIO(device string) error {
- cmd := "blockdev --flushbufs " + device
- out, err := connector.ExecCmd("/bin/bash", "-c", cmd)
- if err != nil {
- errMsg := fmt.Sprintf("Error occurred when get device info when detach volume: %s, %v\n", out, err)
- log.Printf(errMsg)
- return errors.New(errMsg)
- }
- return nil
-}
-
-func getDeviceInfo(devicePath string) (map[string]string, error) {
- cmd := "sg_scan " + devicePath
- out, err := connector.ExecCmd("/bin/bash", "-c", cmd)
- if err != nil {
- errMsg := fmt.Sprintf("Error occurred when get device info: %s, %v\n", out, err)
- log.Printf(errMsg)
- return nil, errors.New(errMsg)
- }
-
- devInfo := make(map[string]string)
- devInfo["device"] = devicePath
-
- line := strings.TrimSpace(out)
-
- info := strings.Split(line, " ")
-
- for _, v := range info {
- if strings.Contains(v, "=") {
- pair := strings.Split(v, "=")
- devInfo[pair[0]] = pair[1]
- }
- if strings.Contains(v, "scsi") {
- devInfo["host"] = strings.Replace(v, "scsi", "", -1)
- }
- }
-
- return devInfo, nil
-}
-
-func fcSupport() bool {
- var FcHostSYSFcPATH = "/sys/class/fc_host"
- return pathExists(FcHostSYSFcPATH)
-}
-
-func pathExists(path string) bool {
- _, err := os.Stat(path)
- if err == nil {
- return true
- }
- if os.IsNotExist(err) {
- return false
- }
- return false
-}
-
-func trimDoubleQuotesInText(str string) string {
- if strings.HasPrefix(str, "\"") && strings.HasSuffix(str, "\"") {
- return str[1 : len(str)-1]
- }
- return str
-}
-
-func getMultipathDevice(deviceWWN string) (string, error) {
- cmd := fmt.Sprintf("ls -l /dev/disk/by-id/ | grep %s", deviceWWN)
- out, err := connector.ExecCmd("/bin/bash", "-c", cmd)
- if err != nil {
- msg := fmt.Sprintf("No DM of wwn %s exist", deviceWWN)
- log.Println(msg)
- return "", nil
- }
-
- lines := strings.Split(strings.TrimSpace(out), "\n")
- for _, line := range lines {
- splits := strings.Split(line, "../../")
- if len(splits) == 2 {
- name := splits[1]
- if strings.HasPrefix(name, "dm") {
- return fmt.Sprintf("/dev/%s", name), nil
- }
- }
- }
-
- msg := fmt.Sprintf("No DM of wwn %s exist", deviceWWN)
- log.Println(msg)
- return "", nil
-}
diff --git a/contrib/connector/iscsi/helper.go b/contrib/connector/iscsi/helper.go
deleted file mode 100755
index 6eb409af9..000000000
--- a/contrib/connector/iscsi/helper.go
+++ /dev/null
@@ -1,390 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package iscsi
-
-import (
- "errors"
- "fmt"
- "log"
- "os"
- "path/filepath"
- "strconv"
- "strings"
- "time"
-
- "github.com/mitchellh/mapstructure"
- "github.com/opensds/opensds/contrib/connector"
-)
-
-// IscsiConnectorInfo define
-type IscsiConnectorInfo struct {
- AccessMode string `mapstructure:"accessMode"`
- AuthUser string `mapstructure:"authUserName"`
- AuthPass string `mapstructure:"authPassword"`
- AuthMethod string `mapstructure:"authMethod"`
- TgtDisco bool `mapstructure:"targetDiscovered"`
- TgtIQN []string `mapstructure:"targetIQN"`
- TgtPortal []string `mapstructure:"targetPortal"`
- VolumeID string `mapstructure:"volumeId"`
- TgtLun int `mapstructure:"targetLun"`
- Encrypted bool `mapstructure:"encrypted"`
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Refer some codes from: https://github.com/j-griffith/csi-cinder //
-// Refer some codes from: https://github.com/kubernetes/kubernetes //
-////////////////////////////////////////////////////////////////////////////////
-
-const (
- //ISCSITranslateTCP tcp
- ISCSITranslateTCP = "tcp"
-)
-
-// statFunc define
-type statFunc func(string) (os.FileInfo, error)
-
-// globFunc define
-type globFunc func(string) ([]string, error)
-
-// waitForPathToExist scan the device path
-func waitForPathToExist(devicePath *string, maxRetries int, deviceTransport string) bool {
- // This makes unit testing a lot easier
- return waitForPathToExistInternal(devicePath, maxRetries, deviceTransport, os.Stat, filepath.Glob)
-}
-
-// waitForPathToExistInternal scan the device path
-func waitForPathToExistInternal(devicePath *string, maxRetries int, deviceTransport string, osStat statFunc, filepathGlob globFunc) bool {
- if devicePath == nil {
- return false
- }
-
- for i := 0; i < maxRetries; i++ {
- var err error
- if deviceTransport == ISCSITranslateTCP {
- _, err = osStat(*devicePath)
- } else {
- fpath, _ := filepathGlob(*devicePath)
- if fpath == nil {
- err = os.ErrNotExist
- } else {
- // There might be a case that fpath contains multiple device paths if
- // multiple PCI devices connect to same iscsi target. We handle this
- // case at subsequent logic. Pick up only first path here.
- *devicePath = fpath[0]
- }
- }
- if err == nil {
- return true
- }
- if !os.IsNotExist(err) {
- return false
- }
- if i == maxRetries-1 {
- break
- }
- time.Sleep(time.Second)
- }
- return false
-}
-
-// GetInitiator returns all the ISCSI Initiator Name
-func getInitiator() ([]string, error) {
- res, err := connector.ExecCmd("cat", "/etc/iscsi/initiatorname.iscsi")
- iqns := []string{}
- if err != nil {
- log.Printf("Error encountered gathering initiator names: %v\n", err)
- return iqns, nil
- }
-
- lines := strings.Split(string(res), "\n")
- for _, l := range lines {
- if strings.Contains(l, "InitiatorName=") {
- iqns = append(iqns, strings.Split(l, "=")[1])
- }
- }
-
- log.Printf("Found the following iqns: %s\n", iqns)
- return iqns, nil
-}
-
-// Discovery ISCSI Target
-func discovery(portal string) (string, error) {
- log.Printf("Discovery portal: %s\n", portal)
- result, err := connector.ExecCmd("iscsiadm", "-m", "discovery", "-t", "sendtargets", "-p", portal)
- if err != nil {
- log.Printf("Error encountered in sendtargets: %v\n", err)
- return "", err
- }
- return strings.Replace(result, "\n", "", -1), nil
-}
-
-// Login ISCSI Target
-func setAuth(portal string, targetiqn string, name string, passwd string) error {
- // Set UserName
- info, err := connector.ExecCmd("iscsiadm", "-m", "node", "-p", portal, "-T", targetiqn,
- "--op=update", "--name", "node.session.auth.username", "--value", name)
- if err != nil {
- log.Printf("Received error on set income username: %v, %v\n", err, info)
- return err
- }
- // Set Password
- info, err = connector.ExecCmd("iscsiadm", "-m", "node", "-p", portal, "-T", targetiqn,
- "--op=update", "--name", "node.session.auth.password", "--value", passwd)
- if err != nil {
- log.Printf("Received error on set income password: %v, %v\n", err, info)
- return err
- }
- return nil
-}
-
-// Login ISCSI Target
-func login(portal string, targetiqn string) error {
- log.Printf("Login portal: %s targetiqn: %s\n", portal, targetiqn)
- // Do not login again if there is an active session.
- cmd := "iscsiadm -m session |grep -w " + portal + "|grep -w " + targetiqn
- _, err := connector.ExecCmd("/bin/bash", "-c", cmd)
- if err == nil {
- log.Printf("there is an active session\n")
- _, err := connector.ExecCmd("iscsiadm", "-m", "session", "-R")
- if err == nil {
- log.Printf("rescan iscsi session success.\n")
- }
- return nil
- }
-
- info, err := connector.ExecCmd("iscsiadm", "-m", "node", "-p", portal, "-T", targetiqn, "--login")
- if err != nil {
- log.Printf("Received error on login attempt: %v, %s\n", err, info)
- return err
- }
- return nil
-}
-
-// Logout ISCSI Target
-func logout(portal string, targetiqn string) error {
- log.Printf("Logout portal: %s targetiqn: %s\n", portal, targetiqn)
- info, err := connector.ExecCmd("iscsiadm", "-m", "node", "-p", portal, "-T", targetiqn, "--logout")
- if err != nil {
- log.Println("Received error on logout attempt", err, info)
- return err
- }
- return nil
-}
-
-// Delete ISCSI Node
-func delete(targetiqn string) (err error) {
- log.Printf("Delete targetiqn: %s\n", targetiqn)
- _, err = connector.ExecCmd("iscsiadm", "-m", "node", "-o", "delete", "-T", targetiqn)
- if err != nil {
- log.Printf("Received error on Delete attempt: %v\n", err)
- return err
- }
- return nil
-}
-
-// ParseIscsiConnectInfo decode
-func parseIscsiConnectInfo(connectInfo map[string]interface{}) (*IscsiConnectorInfo, int, error) {
- var con IscsiConnectorInfo
- mapstructure.Decode(connectInfo, &con)
-
- fmt.Printf("iscsi target portal: %s, target iqn: %s, target lun: %d\n", con.TgtPortal, con.TgtIQN, con.TgtLun)
- if len(con.TgtPortal) == 0 || con.TgtLun == 0 {
- return nil, -1, errors.New("iscsi connection data invalid.")
- }
-
- var index int
-
- log.Printf("TgtPortal:%v\n", con.TgtPortal)
- for i, portal := range con.TgtPortal {
- strs := strings.Split(portal, ":")
- ip := strs[0]
- cmd := "ping -c 2 " + ip
- res, err := connector.ExecCmd("/bin/bash", "-c", cmd)
- log.Printf("ping result:%v\n", res)
- if err != nil {
- log.Printf("ping error:%v\n", err)
- if i == len(con.TgtPortal)-1 {
- return nil, -1, errors.New("no available iscsi portal.")
- }
- continue
- }
- index = i
- break
- }
-
- return &con, index, nil
-}
-
-// Connect ISCSI Target
-func connect(connMap map[string]interface{}) (string, error) {
- conn, index, err := parseIscsiConnectInfo(connMap)
- if err != nil {
- return "", err
- }
- log.Println("connmap info: ", connMap)
- log.Println("conn info is: ", conn)
- portal := conn.TgtPortal[index]
-
- var targetiqn string
- var targetiqnIdx = 1
- if len(conn.TgtIQN) == 0 {
- content, _ := discovery(portal)
- targetiqn = strings.Split(content, " ")[targetiqnIdx]
- } else {
- targetiqn = conn.TgtIQN[index]
- }
-
- targetlun := strconv.Itoa(conn.TgtLun)
-
- cmd := "ls -ali / | sed '2!d' |awk {'print $1'}"
- INODE_NUM, err := connector.ExecCmd("/bin/bash", "-c", cmd)
- if err != nil && INODE_NUM == "2" {
- cmd = "\"pgrep -f /sbin/iscsid\""
- _, err = connector.ExecCmd("/bin/bash", "-c", cmd)
-
- if err != nil {
- cmd = "/sbin/iscsid"
- _, errExec := connector.ExecCmd("/bin/bash", "-c", cmd)
- if errExec != nil {
- return "", fmt.Errorf("Please stop the iscsi process: %v", errExec)
- }
- }
- }
-
- log.Printf("Connect portal: %s targetiqn: %s targetlun: %s\n", portal, targetiqn, targetlun)
- devicePath := strings.Join([]string{
- "/dev/disk/by-path/ip",
- portal,
- "iscsi",
- targetiqn,
- "lun",
- targetlun}, "-")
-
- log.Println("devicepath is ", devicePath)
-
- // Discovery
- _, err = discovery(portal)
- if err != nil {
- return "", err
- }
- if len(conn.AuthMethod) != 0 {
- setAuth(portal, targetiqn, conn.AuthUser, conn.AuthPass)
- }
- //Login
- err = login(portal, targetiqn)
- if err != nil {
- return "", err
- }
-
- isexist := waitForPathToExist(&devicePath, 10, ISCSITranslateTCP)
-
- if !isexist {
- return "", errors.New("Could not connect volume: Timeout after 10s")
- }
-
- return devicePath, nil
-}
-
-// Disconnect ISCSI Target
-func disconnect(conn map[string]interface{}) error {
- iscsiCon, index, err := parseIscsiConnectInfo(conn)
- if err != nil {
- return err
- }
- portal := iscsiCon.TgtPortal[index]
-
- var targetiqn string
- if len(iscsiCon.TgtIQN) == 0 {
- content, _ := discovery(portal)
- targetiqn = strings.Split(content, " ")[1]
- } else {
- targetiqn = iscsiCon.TgtIQN[index]
- }
-
- cmd := "ls /dev/disk/by-path/ |grep -w " + portal + "|grep -w " + targetiqn + "|wc -l |awk '{if($1>1) print 1; else print 0}'"
- logoutFlag, err := connector.ExecCmd("/bin/bash", "-c", cmd)
- if err != nil {
- log.Printf("Disconnect iscsi target failed, %v\n", err)
- return err
- }
-
- logoutFlag = strings.Replace(logoutFlag, "\n", "", -1)
- if logoutFlag == "0" {
- log.Printf("Disconnect portal: %s targetiqn: %s\n", portal, targetiqn)
- // Logout
- err = logout(portal, targetiqn)
- if err != nil {
- return err
- }
-
- //Delete
- err = delete(targetiqn)
- if err != nil {
- return err
- }
-
- return nil
- }
- log.Println("logoutFlag: ", logoutFlag)
- return nil
-}
-
-func getTgtPortalAndTgtIQN() (string, string, error) {
- log.Println("GetTgtPortalAndTgtIQN")
- var targetiqn, targetportal string
- out, err := connector.ExecCmd("iscsiadm", "-m", "session")
- if err != nil {
- errGetPortalAndIQN := fmt.Errorf("Get targetportal And targetiqn failed: %v", err)
- log.Println("Get targetportal And targetiqn failed: ", errGetPortalAndIQN)
- return "", "", errGetPortalAndIQN
- }
-
- lines := strings.Split(string(out), "\n")
-
- for _, line := range lines {
- if strings.Contains(line, "tcp") {
- lineSplit := strings.Split(line, " ")
- targetportalTemp := lineSplit[2]
- targetportal = strings.Split(targetportalTemp, ",")[0]
- targetiqn = lineSplit[3]
- }
- }
-
- if targetiqn != "" && targetportal != "" {
- return targetiqn, targetportal, nil
- }
-
- msg := "targetportal And targetiqn not found"
- log.Println(msg)
- return "", "", errors.New(msg)
-
-}
-
-func getInitiatorInfo() ([]string, error) {
- initiators, err := getInitiator()
- if err != nil {
- return nil, err
- }
-
- if len(initiators) == 0 {
- return nil, errors.New("No iqn found")
- }
-
- if len(initiators) > 1 {
- return nil, errors.New("the number of iqn is wrong")
- }
-
- return initiators, nil
-}
diff --git a/contrib/connector/iscsi/iscsi.go b/contrib/connector/iscsi/iscsi.go
deleted file mode 100755
index e60910bbc..000000000
--- a/contrib/connector/iscsi/iscsi.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package iscsi
-
-import (
- "github.com/opensds/opensds/contrib/connector"
-)
-
-type Iscsi struct{}
-
-func init() {
- connector.RegisterConnector(connector.IscsiDriver, &Iscsi{})
-}
-
-func (isc *Iscsi) Attach(conn map[string]interface{}) (string, error) {
- return connect(conn)
-}
-
-func (isc *Iscsi) Detach(conn map[string]interface{}) error {
- return disconnect(conn)
-}
-
-// GetInitiatorInfo implementation
-func (isc *Iscsi) GetInitiatorInfo() ([]string, error) {
- return getInitiatorInfo()
-}
diff --git a/contrib/connector/nfs/helper.go b/contrib/connector/nfs/helper.go
deleted file mode 100755
index 718759b6d..000000000
--- a/contrib/connector/nfs/helper.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nfs
-
-import (
- "errors"
- "os/exec"
- "strings"
-
- "github.com/mitchellh/mapstructure"
- "github.com/opensds/opensds/contrib/connector"
-)
-
-import (
- "fmt"
-)
-
-type NFSConnectorInfo struct {
- ExportLocations []string `mapstructure:"exportLocations"`
-}
-
-func connect(conn map[string]interface{}) (string, error) {
- exportLocation, err := parseNFSConnectInfo(conn)
- if err != nil {
- return "", err
- }
-
- ipAddr := strings.Split(exportLocation, ":")[0]
- sharePath := strings.Split(exportLocation, ":")[1]
-
- fmt.Printf("export locations: %v\n", exportLocation)
-
- showMountCommand := "showmount"
- _, err = exec.LookPath(showMountCommand)
- if err != nil {
- if err == exec.ErrNotFound {
- return "", fmt.Errorf("%q executable not found in $PATH", showMountCommand)
- }
- return "", err
- }
-
- cmd := fmt.Sprintf("%s -e %s", showMountCommand, ipAddr)
- res, err := connector.ExecCmd("/bin/bash", "-c", cmd)
- if err != nil {
- return "", err
- }
-
- for _, line := range strings.Split(res, "\n") {
- if strings.Contains(line, sharePath) {
- str := strings.TrimSpace(line)
- strArray := strings.Split(str, " ")
-
- fileShareNameIdx := 0
- if strArray[fileShareNameIdx] == sharePath {
- return exportLocation, nil
- }
- }
- }
-
- return "", fmt.Errorf("cannot find fileshare path: %s", sharePath)
-}
-
-// ParseIscsiConnectInfo decode
-func parseNFSConnectInfo(connectInfo map[string]interface{}) (string, error) {
- var con NFSConnectorInfo
- mapstructure.Decode(connectInfo, &con)
-
- fmt.Printf("connection data : %v\n", con)
- if len(con.ExportLocations) == 0 {
- return "", errors.New("nfs connection data is invalid")
- }
-
- for _, lo := range con.ExportLocations {
- strs := strings.Split(lo, ":")
- ipIdx := 0
- ip := strs[ipIdx]
-
- cmd := "ping -c 2 " + ip
- _, err := connector.ExecCmd("/bin/bash", "-c", cmd)
- if err != nil {
- fmt.Printf("ping error: %v\n", err)
- } else {
- return lo, nil
- }
- }
-
- return "", errors.New("no valid export location can be found")
-}
-
-func disconnect(conn map[string]interface{}) error {
- return errors.New("disconnect method of nfs is not implemented")
-}
-
-func getInitiatorInfo() ([]string, error) {
- return nil, errors.New("get initiator information method of nfs is not implemented")
-}
diff --git a/contrib/connector/nfs/nfs.go b/contrib/connector/nfs/nfs.go
deleted file mode 100755
index 89f516c9a..000000000
--- a/contrib/connector/nfs/nfs.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nfs
-
-import (
- "github.com/opensds/opensds/contrib/connector"
-)
-
-type NFS struct{}
-
-func init() {
- connector.RegisterConnector(connector.NFSDriver, &NFS{})
-}
-
-func (n *NFS) Attach(conn map[string]interface{}) (string, error) {
- return connect(conn)
-}
-
-func (n *NFS) Detach(conn map[string]interface{}) error {
- return disconnect(conn)
-}
-
-// GetInitiatorInfo implementation
-func (n *NFS) GetInitiatorInfo() ([]string, error) {
- return getInitiatorInfo()
-}
diff --git a/contrib/connector/nvmeof/nvmeof.go b/contrib/connector/nvmeof/nvmeof.go
deleted file mode 100755
index 22b95741b..000000000
--- a/contrib/connector/nvmeof/nvmeof.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nvmeof
-
-import (
- "github.com/opensds/opensds/contrib/connector"
-)
-
-type Nvmeof struct{}
-
-func init() {
- connector.RegisterConnector(connector.NvmeofDriver, &Nvmeof{})
- connector.ExecCmd("modprobe", "nvme-rdma")
-}
-
-func (nof *Nvmeof) Attach(conn map[string]interface{}) (string, error) {
- return Connect(conn)
-}
-
-func (nof *Nvmeof) Detach(conn map[string]interface{}) error {
- NvmeofCon := ParseNvmeofConnectInfo(conn)
- return DisConnect(NvmeofCon.Nqn)
-}
-
-// GetInitiatorInfo implementation
-func (nof *Nvmeof) GetInitiatorInfo() ([]string, error) {
- return getInitiatorInfo()
-}
diff --git a/contrib/connector/nvmeof/nvmeof_helper.go b/contrib/connector/nvmeof/nvmeof_helper.go
deleted file mode 100755
index 037bde1f2..000000000
--- a/contrib/connector/nvmeof/nvmeof_helper.go
+++ /dev/null
@@ -1,209 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nvmeof
-
-import (
- "errors"
- "log"
- "strings"
- "time"
-
- "github.com/mitchellh/mapstructure"
- "github.com/opensds/opensds/contrib/connector"
-)
-
-const (
- iniNvmePrefix = "nqn.ini."
-)
-
-// ConnectorInfo define
-type ConnectorInfo struct {
- Nqn string `mapstructure:"targetNQN"` //NVMe subsystem name to the volume to be connected
- TgtPort string `mapstructure:"targetPort"` //NVMe target port that hosts the nqn sybsystem
- TgtPortal string `mapstructure:"targetIP"` //NVMe target ip that hosts the nqn sybsystem
- TranType string `mapstructure:"transporType"` // Nvme transport type
- HostNqn string `mapstructure:"hostNqn"` // host nqn
-}
-
-//////////////////////////////////////////////////////////////////////////////////////////
-// Refer some codes from: https://github.intel.com/yingxinc/cinder-rsd-os-brick //
-//////////////////////////////////////////////////////////////////////////////////////////
-
-// GetInitiator returns all the Nvmeof UUID
-func GetInitiator() ([]string, error) {
- res, err := connector.ExecCmd("dmidecode")
- nqns := []string{}
- if err != nil {
- log.Printf("Unable to execute dmidecode,Error encountered gathering Nvmeof UUID: %v\n", err)
- return nqns, nil
- }
-
- lines := strings.Split(string(res), "\n")
- for _, l := range lines {
- if strings.Contains(l, "UUID: ") {
- tmp := iniNvmePrefix + strings.Split(l, ": ")[1]
- nqns = append(nqns, tmp)
- log.Printf("Found the following nqns: %s", nqns)
- return nqns, nil
- }
- }
- log.Println("can not find any nqn initiator")
- return nqns, errors.New("can not find any nqn initiator")
-}
-
-func getInitiatorInfo() ([]string, error) {
-
- initiators, err := GetInitiator()
- if err != nil {
- return nil, err
- }
-
- if len(initiators) == 0 {
- return nil, errors.New("no nqn found")
- }
-
- if len(initiators) > 1 {
- return nil, errors.New("the number of nqn is wrong")
- }
-
- hostName, err := connector.GetHostName()
- if err != nil {
- return nil, errors.New("can not get hostname")
- }
-
- hostName = initiators[0] + "." + hostName
-
- initiator := make([]string, 1)
- initiator = append(initiator, hostName)
-
- return initiator, nil
-}
-
-// GetNvmeDevice get all the nvme devices
-func GetNvmeDevice() (map[string]int, error) {
- nvmeDevice := make(map[string]int)
- pattern := "/dev/nvme"
- Npath, err := connector.ExecCmd("nvme", "list")
- if err != nil {
- return nvmeDevice, err
- }
- log.Println("nvme list succeed")
- lines := strings.Split(string(Npath), "\n")
- for _, l := range lines {
- if strings.Contains(l, pattern) {
- name := strings.Split(l, " ")[0]
- nvmeDevice[name] = 1
- }
- }
- return nvmeDevice, err
-}
-
-// GetNvmeSubsystems :list connected target name
-func GetNvmeSubsystems() (map[string]int, error) {
- nqn := make(map[string]int)
- res, err := connector.ExecCmd("nvme", "list-subsys")
- if err != nil {
- return nqn, err
- }
-
- lines := strings.Split(string(res), "\n")
- for _, l := range lines {
- if strings.Contains(l, "NQN=") {
- name := strings.Split(l, "NQN=")[1]
- nqn[name] = 1
- }
- }
-
- log.Printf("Found the following NQN: %s", res)
- return nqn, nil
-}
-
-// Discovery NVMe-OF target
-func Discovery(connMap map[string]interface{}) error {
- conn := ParseNvmeofConnectInfo(connMap)
- targetip := conn.TgtPortal
- targetport := conn.TgtPort
- info, err := connector.ExecCmd("nvme", "discover", "-t", "tcp", "-a", targetip, "-s", targetport)
- if err != nil {
- log.Printf("Error encountered in send targets:%v, %v\n", err, info)
- return err
- }
- return nil
-}
-
-// Connect NVMe-OF Target ,return the new target device path in this node
-func Connect(connMap map[string]interface{}) (string, error) {
- CurrentNvmeDevice, _ := GetNvmeDevice()
- conn := ParseNvmeofConnectInfo(connMap)
- connNqn := conn.Nqn
- targetPortal := conn.TgtPortal
- port := conn.TgtPort
- nvmeTransportType := conn.TranType
- hostName := conn.HostNqn
-
- cmd := "nvme connect -t " + nvmeTransportType + " -n " + connNqn + " -s " + port + " -a " + targetPortal
- if hostName != "ALL" {
- cmd += " -q " + hostName
- }
- //log.Printf("conn information:%s, %s, %s ", connNqn, targetPortal, port)
-
- _, err := connector.ExecCmd("/bin/bash", "-c", cmd)
-
- if err != nil {
- log.Println("Failed to connect to NVMe nqn :", connNqn)
- return "", err
- }
-
- for retry := 0; retry < 10; retry++ {
- allNvmeDevices, _ := GetNvmeDevice()
- for p, _ := range allNvmeDevices {
- if _, ok := CurrentNvmeDevice[p]; !ok {
- log.Printf("NVMe device to be connected to is : %v", p)
- return p, nil
- }
- time.Sleep(time.Second)
- }
- }
- return "", errors.New("could not connect volume: Timeout after 10s")
-}
-
-// DisConnect nvme device by name
-func DisConnect(nqn string) error {
- currentNvmeNames, err := GetNvmeSubsystems()
- if err != nil {
- log.Println("can not get nvme device")
- return err
- }
- if _, ok := currentNvmeNames[nqn]; !ok {
- log.Println("Trying to disconnect nqn" + nqn +
- "is not connected.")
- return errors.New("device path not found ")
- }
-
- _, err = connector.ExecCmd("nvme", "disconnect", "-n", nqn)
- if err != nil {
- log.Println("could not disconnect nvme nqn : ", nqn)
- return err
- }
- log.Println(" disconnect nvme nqn : ", nqn)
- return nil
-}
-
-// ParseNvmeofConnectInfo decode
-func ParseNvmeofConnectInfo(connectInfo map[string]interface{}) *ConnectorInfo {
- var con ConnectorInfo
- mapstructure.Decode(connectInfo, &con)
- return &con
-}
diff --git a/contrib/connector/rbd/rbd.go b/contrib/connector/rbd/rbd.go
deleted file mode 100755
index f90922a83..000000000
--- a/contrib/connector/rbd/rbd.go
+++ /dev/null
@@ -1,220 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rbd
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "os/exec"
- "path"
- "path/filepath"
- "reflect"
- "strings"
- "time"
-
- "github.com/opensds/opensds/contrib/connector"
-)
-
-var (
- rbdBusPath = "/sys/bus/rbd"
- rbdDevicePath = path.Join(rbdBusPath, "devices")
- rbdDev = "/dev/rbd"
-)
-
-type RBD struct{}
-
-var _ connector.Connector = &RBD{}
-
-func init() {
- connector.RegisterConnector(connector.RbdDriver, &RBD{})
-}
-
-func convertToStrList(in interface{}) ([]string, error) {
- var out []string
- switch in.(type) {
- case []string:
- out = in.([]string)
- case []interface{}:
- for _, v := range in.([]interface{}) {
- out = append(out, v.(string))
- }
- case string:
- out = append(out, in.(string))
- default:
- return out, fmt.Errorf("unsupported type: %v", reflect.TypeOf(in))
- }
- return out, nil
-}
-
-func (*RBD) Attach(conn map[string]interface{}) (string, error) {
- if _, ok := conn["name"]; !ok {
- return "", fmt.Errorf("cann't get name in connection")
- }
-
- name, ok := conn["name"].(string)
- if !ok {
- return "", fmt.Errorf("invalid connection name %v", conn["name"])
- }
-
- hosts, err := convertToStrList(conn["hosts"])
- if err != nil {
- return "", fmt.Errorf("invalid connection hosts %v: %v", conn["hosts"], err)
- }
-
- ports, err := convertToStrList(conn["ports"])
- if err != nil {
- return "", fmt.Errorf("invalid connection ports %v: %v", conn["ports"], err)
- }
-
- device, err := mapDevice(name, hosts, ports)
- if err != nil {
- return "", err
- }
-
- return device, nil
-}
-
-func (*RBD) Detach(conn map[string]interface{}) error {
- if _, ok := conn["name"]; !ok {
- return os.ErrInvalid
- }
- name, ok := conn["name"].(string)
- if !ok {
- return fmt.Errorf("invalid connection name %v", conn["name"])
- }
- device, err := findDevice(name, 1)
- if err != nil {
- return err
- }
-
- _, err = exec.Command("rbd", "unmap", device).CombinedOutput()
- return err
-}
-
-// GetInitiatorInfo implementation
-func (*RBD) GetInitiatorInfo() ([]string, error) {
-
- hostName, err := connector.GetHostName()
-
- if err != nil {
- return nil, err
- }
-
- initiator := make([]string, 1)
- initiator = append(initiator, hostName)
-
- return initiator, nil
-}
-
-func parseName(name string) (poolName, imageName, snapName string, err error) {
- fields := strings.Split(name, "/")
- if len(fields) != 2 {
- err = fmt.Errorf("invalid connection name %s", name)
- return
- }
- poolName, imageName, snapName = fields[0], fields[1], "-"
-
- imgAndSnap := strings.Split(fields[1], "@")
- if len(imgAndSnap) == 2 {
- imageName, snapName = imgAndSnap[0], imgAndSnap[1]
- }
- return
-}
-
-func mapDevice(name string, hosts, ports []string) (string, error) {
- devName, err := findDevice(name, 1)
- if err == nil {
- return devName, nil
- }
-
- // modprobe
- exec.Command("modprobe", "rbd").CombinedOutput()
-
- for i := 0; i < len(hosts); i++ {
- _, err = exec.Command("rbd", "map", name).CombinedOutput()
- if err == nil {
- break
- }
- }
-
- devName, err = findDevice(name, 10)
- if err != nil {
- return "", err
- }
-
- return devName, nil
-}
-
-func findDevice(name string, retries int) (string, error) {
- poolName, imageName, snapName, err := parseName(name)
- if err != nil {
- return "", err
- }
-
- for i := 0; i < retries; i++ {
- if name, err := findDeviceTree(poolName, imageName, snapName); err == nil {
- if _, err := os.Stat(rbdDev + name); err != nil {
- return "", err
- }
-
- return rbdDev + name, nil
- }
-
- time.Sleep(time.Second)
- }
-
- return "", os.ErrNotExist
-}
-
-func findDeviceTree(poolName, imageName, snapName string) (string, error) {
- fi, err := ioutil.ReadDir(rbdDevicePath)
- if err != nil && err != os.ErrNotExist {
- return "", err
- } else if err == os.ErrNotExist {
- return "", fmt.Errorf("Could not locate devices directory")
- }
-
- for _, f := range fi {
- namePath := filepath.Join(rbdDevicePath, f.Name(), "name")
- content, err := ioutil.ReadFile(namePath)
- if err != nil {
- return "", err
- }
- if strings.TrimSpace(string(content)) != imageName {
- continue
- }
-
- poolPath := filepath.Join(rbdDevicePath, f.Name(), "pool")
- content, err = ioutil.ReadFile(poolPath)
- if err != nil {
- return "", err
- }
- if strings.TrimSpace(string(content)) != poolName {
- continue
- }
-
- snapPath := filepath.Join(rbdDevicePath, f.Name(), "current_snap")
- content, err = ioutil.ReadFile(snapPath)
- if err != nil {
- return "", err
- }
- if strings.TrimSpace(string(content)) == snapName {
- return f.Name(), nil
- }
- }
-
- return "", os.ErrNotExist
-}
diff --git a/contrib/drivers/ceph/ceph.go b/contrib/drivers/ceph/ceph.go
deleted file mode 100755
index cb892f620..000000000
--- a/contrib/drivers/ceph/ceph.go
+++ /dev/null
@@ -1,713 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-/*
-This module implements ceph driver for OpenSDS. Ceph driver will pass these
-operation requests about volume to go-ceph module.
-*/
-
-package ceph
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "os"
- "runtime"
-
- "github.com/ceph/go-ceph/rados"
- "github.com/ceph/go-ceph/rbd"
- log "github.com/golang/glog"
- "github.com/opensds/opensds/contrib/backup"
- "github.com/opensds/opensds/contrib/connector"
- . "github.com/opensds/opensds/contrib/drivers/utils/config"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- "github.com/opensds/opensds/pkg/utils"
- "github.com/opensds/opensds/pkg/utils/config"
- uuid "github.com/satori/go.uuid"
-)
-
-const (
- opensdsPrefix = "opensds-"
- sizeShiftBit = 30
- defaultConfPath = "/etc/opensds/driver/ceph.yaml"
- defaultAZ = "default"
-)
-
-const (
- KPoolName = "CephPoolName"
- KImageName = "CephImageName"
-)
-
-type CephConfig struct {
- ConfigFile string `yaml:"configFile,omitempty"`
- Pool map[string]PoolProperties `yaml:"pool,flow"`
-}
-
-func EncodeName(id string) string {
- return opensdsPrefix + id
-}
-
-func NewSrcMgr(conf *CephConfig) *SrcMgr {
- return &SrcMgr{conf: conf}
-}
-
-type SrcMgr struct {
- conn *rados.Conn
- ioctx *rados.IOContext
- img *rbd.Image
- conf *CephConfig
-}
-
-func (s *SrcMgr) GetConn() (*rados.Conn, error) {
- if s.conn != nil {
- return s.conn, nil
- }
- conn, err := rados.NewConn()
- if err != nil {
- log.Error("New connect failed:", err)
- return nil, err
- }
-
- if err = conn.ReadConfigFile(s.conf.ConfigFile); err != nil {
- log.Error("Read config file failed:", err)
- return nil, err
- }
- if err = conn.Connect(); err != nil {
- log.Error("Connect failed:", err)
- return nil, err
- }
- s.conn = conn
- return s.conn, nil
-}
-
-func (s *SrcMgr) GetIoctx(poolName string) (*rados.IOContext, error) {
- if s.ioctx != nil {
- return s.ioctx, nil
- }
-
- conn, err := s.GetConn()
- if err != nil {
- return nil, err
- }
- ioctx, err := conn.OpenIOContext(poolName)
- if err != nil {
- log.Error("Open IO context failed, poolName:", poolName, err)
- return nil, err
- }
- s.ioctx = ioctx
- return s.ioctx, err
-}
-
-// this function only used for open origin image rather than clone image or copy image
-func (s *SrcMgr) GetOriginImage(poolName string, imgName string, args ...interface{}) (*rbd.Image, error) {
- if s.img != nil {
- return s.img, nil
- }
- ioctx, err := s.GetIoctx(poolName)
- if err != nil {
- return nil, err
- }
- img := rbd.GetImage(ioctx, imgName)
- if err := img.Open(args...); err != nil {
- log.Error("When open image:", err)
- return nil, err
- }
- s.img = img
- return s.img, nil
-}
-
-func (s *SrcMgr) destroy() {
- if s.img != nil {
- s.img.Close()
- s.img = nil
- }
- if s.ioctx != nil {
- s.ioctx.Destroy()
- s.ioctx = nil
- }
- if s.conn != nil {
- s.conn.Shutdown()
- s.conn = nil
- }
-}
-
-type Driver struct {
- conf *CephConfig
-}
-
-func (d *Driver) Setup() error {
- d.conf = &CephConfig{ConfigFile: "/etc/ceph/ceph.conf"}
- p := config.CONF.OsdsDock.Backends.Ceph.ConfigPath
- if "" == p {
- p = defaultConfPath
- }
- _, err := Parse(d.conf, p)
- return err
-}
-
-func (d *Driver) Unset() error { return nil }
-
-func (d *Driver) createVolumeFromSnapshot(opt *pb.CreateVolumeOpts) error {
- mgr := NewSrcMgr(d.conf)
- defer mgr.destroy()
-
- poolName := opt.GetPoolName()
- srcSnapName := EncodeName(opt.GetSnapshotId())
- srcImgName := opt.GetMetadata()[KImageName]
- destImgName := EncodeName(opt.GetId())
-
- img, err := mgr.GetOriginImage(poolName, srcImgName, srcSnapName)
- if err != nil {
- return err
- }
- snap := img.GetSnapshot(srcSnapName)
- if ok, _ := snap.IsProtected(); !ok {
- if err := snap.Protect(); err != nil {
- log.Errorf("protect snapshot failed, %v", err)
- return err
- }
- defer snap.Unprotect()
- }
-
- ioctx, err := mgr.GetIoctx(poolName)
- if err != nil {
- return err
- }
-
- destImg, err := img.Clone(srcSnapName, ioctx, destImgName, rbd.RbdFeatureLayering, 20)
- if err != nil {
- log.Errorf("snapshot clone failed:%v", err)
- return err
- }
-
- // flatten dest image
- if err := destImg.Open(); err != nil {
- log.Error("new image open failed:", err)
- return err
- }
- defer destImg.Close()
- if err := destImg.Flatten(); err != nil {
- log.Errorf("new image flatten failed, %v", err)
- return err
- }
-
- log.Infof("create volume (%s) from snapshot (%s) success", srcImgName, srcSnapName)
- return nil
-}
-
-func (d *Driver) createVolume(opt *pb.CreateVolumeOpts) error {
- mgr := NewSrcMgr(d.conf)
- defer mgr.destroy()
-
- ioctx, err := mgr.GetIoctx(opt.GetPoolName())
- if err != nil {
- return err
- }
-
- name := EncodeName(opt.GetId())
- _, err = rbd.Create(ioctx, name, uint64(opt.GetSize())<> sizeShiftBit,
- FreeCapacity: p.Stats.MaxAvail >> sizeShiftBit,
- StorageType: d.conf.Pool[p.Name].StorageType,
- Extras: d.conf.Pool[p.Name].Extras,
- AvailabilityZone: d.conf.Pool[p.Name].AvailabilityZone,
- MultiAttach: d.conf.Pool[p.Name].MultiAttach,
- }
- if pol.AvailabilityZone == "" {
- pol.AvailabilityZone = defaultAZ
- }
- pols = append(pols, pol)
- }
- return pols, nil
-}
-
-func (d *Driver) InitializeSnapshotConnection(opt *pb.CreateSnapshotAttachmentOpts) (*model.ConnectionInfo, error) {
- poolName, ok := opt.GetMetadata()[KPoolName]
- if !ok {
- err := errors.New("Failed to find poolName in snapshot attachment metadata!")
- log.Error(err)
- return nil, err
- }
-
- imgName, ok := opt.GetMetadata()[KImageName]
- if !ok {
- err := errors.New("Failed to find imageName in snapshot attachment metadata!")
- log.Error(err)
- return nil, err
- }
-
- return &model.ConnectionInfo{
- DriverVolumeType: RBDProtocol,
- ConnectionData: map[string]interface{}{
- "secret_type": "ceph",
- "name": fmt.Sprintf("%s/%s@%s", poolName, imgName, EncodeName(opt.GetSnapshotId())),
- "cluster_name": "ceph",
- "hosts": []string{opt.GetHostInfo().Host},
- "volume_id": opt.GetSnapshotId(),
- "access_mode": "rw",
- "ports": []string{"6789"},
- },
- }, nil
-}
-
-func (d *Driver) TerminateSnapshotConnection(opt *pb.DeleteSnapshotAttachmentOpts) error {
- return nil
-}
-
-func (d *Driver) CreateVolumeGroup(opt *pb.CreateVolumeGroupOpts) (*model.VolumeGroupSpec, error) {
- return nil, &model.NotImplementError{"method CreateVolumeGroup has not been implemented yet"}
-}
-
-func (d *Driver) UpdateVolumeGroup(opt *pb.UpdateVolumeGroupOpts) (*model.VolumeGroupSpec, error) {
- return nil, &model.NotImplementError{"method UpdateVolumeGroup has not been implemented yet"}
-}
-
-func (d *Driver) DeleteVolumeGroup(opt *pb.DeleteVolumeGroupOpts) error {
- return &model.NotImplementError{"method DeleteVolumeGroup has not been implemented yet"}
-}
diff --git a/contrib/drivers/ceph/ceph_metrics.go b/contrib/drivers/ceph/ceph_metrics.go
deleted file mode 100644
index 7394c83a5..000000000
--- a/contrib/drivers/ceph/ceph_metrics.go
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-package ceph
-
-import (
- "strconv"
- "time"
-
- "github.com/opensds/opensds/pkg/model"
-)
-
-// Supported metrics
-var data = `
-resources:
- - resource: pool
- metrics:
- - pool_used_bytes
- - pool_raw_used_bytes
- - pool_available_bytes
- - pool_objects_total
- - pool_dirty_objects_total
- - pool_read_total
- - pool_read_bytes_total
- - pool_write_total
- - pool_write_bytes_total
- - resource: cluster
- metrics:
- - cluster_capacity_bytes
- - cluster_used_bytes
- - cluster_available_bytes
- - cluster_objects
- - resource: osd
- metrics:
- - osd_perf_commit_latency
- - osd_perf_apply_latency
- - osd_crush_weight
- - osd_depth
- - osd_reweight
- - osd_bytes
- - osd_used_bytes
- - osd_avail_bytes
- - osd_utilization
- - osd_variance
- - osd_pgs
- - osd_total_bytes
- - osd_total_used_bytes
- - osd_total_avail_bytes
- - osd_average_utilization
- - resource: health
- metrics:
- - health_status
- - total_pgs
- - active_pgs
- - scrubbing_pgs
- - deep_scrubbing_pgs
- - recovering_pgs
- - recovery_wait_pgs
- - backfilling_pgs
- - forced_recovery_pgs
- - forced_backfill_pgs
- - down_pgs
- - slow_requests
- - degraded_pgs
- - stuck_degraded_pgs
- - unclean_pgs
- - stuck_unclean_pgs
- - undersized_pgs
- - stuck_undersized_pgs
- - stale_pgs
- - stuck_stale_pgs
- - peering_pgs
- - degraded_objects
- - misplaced_objects
- - osdmap_flag_full
- - osdmap_flag_pauserd
- - osdmap_flag_pausewr
- - osdmap_flag_noup
- - osdmap_flag_nodown
- - osdmap_flag_noin
- - osdmap_flag_noout
- - osdmap_flag_nobackfill
- - osdmap_flag_norecover
- - osdmap_flag_norebalance
- - osdmap_flag_noscrub
- - osdmap_flag_nodeep_scrub
- - osdmap_flag_notieragent
- - osds_down
- - osds_up
- - osds_in
- - osds
- - pgs_remapped
- - recovery_io_bytes
- - recovery_io_keys
- - recovery_io_objects
- - client_io_read_bytes
- - client_io_write_bytes
- - client_io_ops
- - client_io_read_ops
- - client_io_write_ops
- - cache_flush_io_bytes
- - cache_evict_io_bytes
- - cache_promote_io_ops
- - resource: monitor
- metrics:
- - name
- - kb_total
- - kb_used
- - kb_avail
- - avail_percent
- - bytes_total
- - bytes_sst
- - bytes_log
- - bytes_misc
- - skew
- - latency
- - quorum
- - resource: volume
- metrics:
- - volume_name
- - volume_size_bytes
- - volume_objects
- - volume_objects_size_bytes
-`
-
-type MetricDriver struct {
- cli *MetricCli
-}
-
-func getCurrentUnixTimestamp() int64 {
- now := time.Now()
- secs := now.Unix()
- return secs
-}
-
-func (d *MetricDriver) CollectMetrics() ([]*model.MetricSpec, error) {
-
- metricMap, instance, err := d.cli.CollectMetrics()
- var tempMetricArray []*model.MetricSpec
- for i := 0; i < len(metricMap); i++ {
- val, _ := strconv.ParseFloat(metricMap[i].Value, 64)
- associatorMap := make(map[string]string)
- for k := range metricMap[i].Const_Label {
- associatorMap[k] = metricMap[i].Const_Label[k]
- }
- if metricMap[i].Var_Label != nil {
- for k := range metricMap[i].Var_Label {
- associatorMap[k] = metricMap[i].Var_Label[k]
- }
- }
- metricValue := &model.Metric{
- Value: val,
- Timestamp: getCurrentUnixTimestamp(),
- }
- metricValues := make([]*model.Metric, 0)
- metricValues = append(metricValues, metricValue)
- metric := &model.MetricSpec{
- InstanceID: instance[0],
- InstanceName: instance[1],
- Job: "ceph",
- Labels: associatorMap,
- Component: metricMap[i].Component,
- Name: metricMap[i].Name,
- Unit: metricMap[i].Unit,
- AggrType: metricMap[i].AggrType,
- MetricValues: metricValues,
- }
- tempMetricArray = append(tempMetricArray, metric)
- }
- metricArray := tempMetricArray
- return metricArray, err
-}
-
-func (d *MetricDriver) Setup() error {
- cli, err := NewMetricCli()
- if err != nil {
- return err
- }
- d.cli = cli
- return nil
-}
-
-func (d *MetricDriver) Teardown() error {
- d.cli.conn.Shutdown()
- return nil
-}
diff --git a/contrib/drivers/ceph/ceph_metrics_test.go b/contrib/drivers/ceph/ceph_metrics_test.go
deleted file mode 100644
index d0470213f..000000000
--- a/contrib/drivers/ceph/ceph_metrics_test.go
+++ /dev/null
@@ -1,223 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package ceph
-
-import (
- "fmt"
- "reflect"
- "strconv"
- "testing"
-
- "github.com/opensds/opensds/pkg/utils/exec"
-
- "github.com/opensds/opensds/pkg/model"
-)
-
-var pool_Label map[string]string = map[string]string{"cluster": "ceph", "pool": "rbd"}
-var osd_label map[string]string = map[string]string{"cluster": "ceph", "osd": "osd.0"}
-var cluster_label map[string]string = map[string]string{"cluster": "ceph"}
-var health_label map[string]string = map[string]string{"cluster": "ceph"}
-var volume_label map[string]string = map[string]string{"cluster": "ceph"}
-
-var expected_data map[string]CephMetricStats = map[string]CephMetricStats{
- "pool_used_bytes": {"used", "859", "bytes", nil, "", pool_Label, "pool"},
- "pool_raw_used_bytes": {"raw_used", "859", "bytes", nil, "", pool_Label, "pool"},
- "pool_available_bytes": {"available", "469501706240", "bytes", nil, "", pool_Label, "pool"},
- "pool_objects_total": {"objects", "14", "", nil, "total", pool_Label, "pool"},
- "pool_dirty_objects_total": {"dirty_objects", "14", "", nil, "total", pool_Label, "pool"},
- "pool_read_total": {"read", "145", "", nil, "total", pool_Label, "pool"},
- "pool_read_bytes_total": {"read", "918304", "bytes", nil, "total", pool_Label, "pool"},
- "pool_write_total": {"write", "1057", "", nil, "total", pool_Label, "pool"},
- "pool_write_bytes_total": {"write", "16384", "bytes", nil, "total", pool_Label, "pool"},
- "cluster_capacity_bytes": {"capacity", "494462976000", "bytes", nil, "", cluster_label, "cluster"},
- "cluster_used_bytes": {"used", "238116864", "bytes", nil, "", cluster_label, "cluster"},
- "cluster_available_bytes": {"available", "494224859136", "bytes", nil, "", cluster_label, "cluster"},
- "cluster_objects": {"objects", "14", "", nil, "", cluster_label, "cluster"},
- "perf_commit_latency_ms": {"perf_commit_latency", "0", "ms", nil, "", osd_label, ""},
- "perf_apply_latency_ms": {"perf_apply_latency", "0", "ms", nil, "", osd_label, ""},
- "osd_crush_weight": {"crush_weight", ":0", "", nil, "", osd_label, "osd"},
- "osd_depth": {"depth", "2", "", nil, "", osd_label, "osd"},
- "osd_reweight": {"reweight", "1.000000", "", nil, "", osd_label, "osd"},
- "osd_bytes": {"bytes", "15717356000", "", nil, "", osd_label, "osd"},
- "osd_bytes_used": {"bytes_used", "114624000", "", nil, "", osd_label, "osd"},
- "osd_bytes_avail": {"bytes_avail", "15602732000", "", nil, "", osd_label, "osd"},
- "osd_utilization": {"utilization", "0.729283", "", nil, "", osd_label, "osd"},
- "osd_var": {"var", "1.000000", "", nil, "", osd_label, "osd"},
- "osd_pgs": {"pgs", "102", "", nil, "", osd_label, "osd"},
- "osd_total_bytes": {"total", "15717356000", "bytes", nil, "", health_label, "osd"},
- "osd_total_used_bytes": {"total_used", "114624000", "bytes", nil, "", health_label, "osd"},
- "osd_total_avail_bytes": {"total_avail", "15602732000", "bytes", nil, "", health_label, "osd"},
- "osd_average_utilization": {"average_utilization", "0.729283", "", nil, "", health_label, "osd"},
- "osd": {"osd", "0", "", nil, "", osd_label, ""},
- "osd_up": {"osd_up", "1", "", nil, "", osd_label, ""},
- "osd_in": {"osd_in", "1", "", nil, "", osd_label, ""},
- "client_io_write_ops": {"io_write", "0", "ops", nil, "", health_label, "client"},
- "client_io_read_bytes": {"io_read", "0", "bytes", nil, "", health_label, "client"},
- "client_io_read_ops": {"io_read", "0", "ops", nil, "", health_label, "client"},
- "client_io_write_bytes": {"io_write", "0", "bytes", nil, "", health_label, "client"},
- "cache_flush_io_bytes": {"cache_flush_io", "0", "bytes", nil, "", health_label, ""},
- "cache_evict_io_bytes": {"cache_evict_io", "0", "bytes", nil, "", health_label, ""},
- "cache_promote_io_ops": {"cache_promote_io", "0", "ops", nil, "", health_label, ""},
- "degraded_objects": {"degraded_objects", "0", "", nil, "", health_label, ""},
- "misplaced_objects": {"misplaced_objects", "0", "", nil, "", health_label, ""},
- "osds": {"osds", "1", "", nil, "", health_label, ""}, "osds_up": {"osds_up", "1", "", nil, "", health_label, ""}, "osds_in": {"osds_in", "1", "", nil, "", health_label, ""},
- "pgs_remapped": {"pgs_remapped", "0", "", nil, "", health_label, ""}, "total_pgs": {"total_pgs", "102", "", nil, "", health_label, ""}, "volume_name": {"name", "opensds-4c5cb264-50d1-4bfd-a663-dface9b669c9", "", nil, "", volume_label, "volume"},
- "volume_size_bytes": {"size", "1073741824", "bytes", nil, "", volume_label, "volume"},
- "volume_objects": {"objects", "1024", "", nil, "", volume_label, "volume"},
- "volume_object_size_bytes": {"object_size", "1048576", "bytes", nil, "", volume_label, "volume"}}
-
-var expctdMetricList []string = []string{"pool_used_bytes", "pool_raw_used_bytes", "pool_available_bytes", "pool_objects_total", "pool_dirty_objects_total", "pool_read_total", "pool_read_bytes_total", "pool_write_total", "pool_write_bytes_total",
- "cluster_capacity_bytes", "cluster_available_bytes", "cluster_used_bytes", "cluster_objects", "perf_commit_latency_ms", "perf_apply_latency_ms", "osd_crush_weight", "osd_depth", "osd_reweight", "osd_bytes", "osd_bytes_used", "osd_bytes_avail", "osd_utilization",
- "osd_var", "osd_pgs", "osd_total_bytes", "osd_total_used_bytes", "osd_total_avail_bytes", "osd_average_utilization", "osd", "osd_up", "osd_in", "client_io_write_ops", "client_io_read_bytes", "client_io_read_ops", "client_io_write_bytes", "cache_flush_io_bytes", "cache_evict_io_bytes", "cache_promote_io_ops", "degraded_objects", "misplaced_objects", "osds", "osds_up", "osds_in", "pgs_remapped", "total_pgs", "volume_name", "volume_size_bytes", "volume_objects", "volume_object_size_bytes"}
-
-var fakeResp map[string]*MetricFakeResp = map[string]*MetricFakeResp{`{"detail":"detail","format":"json","prefix":"df"}`: {[]byte(`{"stats":{"total_bytes":494462976000,"total_used_bytes":238116864,"total_avail_bytes":494224859136,"total_objects":14},"pools":[{"name":"rbd","id":1,"stats":{"kb_used":1,"bytes_used":859,"percent_used":0.00,"max_avail":469501706240,"objects":14,"quota_objects":0,"quota_bytes":0,"dirty":14,"rd":145,"rd_bytes":918304,"wr":1057,"wr_bytes":16384,"raw_bytes_used":859}}]}`), "", nil},
- `{"format":"json","prefix":"osd df"}`: {[]byte(`{"nodes":[{"id":0,"device_class":"hdd","name":"osd.0","type":"osd","type_id":0,"crush_weight":0,"depth":2,"pool_weights":{},"reweight":1.000000,"kb":15717356,"kb_used":114624,"kb_avail":15602732,"utilization":0.729283,"var":1.000000,"pgs":102}],"stray":[],"summary":{"total_kb":15717356,"total_kb_used":114624,"total_kb_avail":15602732,"average_utilization":0.729283,"min_var":1.000000,"max_var":1.000000,"dev":0.000000}}`), "", nil},
- `{"format":"json","prefix":"osd dump"}`: {[]byte(`{"epoch":19,"fsid":"282d4751-4f33-4186-b983-b51cc21a5a8e","created":"2019-05-07 11:49:02.459507","modified":"2019-05-14 16:25:56.992964","flags":"sortbitwise,recovery_deletes,purged_snapdirs","crush_version":3,"full_ratio":0.950000,"backfillfull_ratio":0.900000,"nearfull_ratio":0.850000,"cluster_snapshot":"","pool_max":3,"max_osd":1,"require_min_compat_client":"jewel","min_compat_client":"jewel","require_osd_release":"luminous","pools":[{"pool":1,"pool_name":"sample_pool","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"object_hash":2,"pg_num":1,"pg_placement_num":1,"crash_replay_interval":0,"last_change":"8","last_force_op_resend":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{},"application_metadata":{}},{"pool":2,"pool_name":"rbd","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"object_hash":2,"pg_num":1,"pg_placement_num":1,"crash_replay_interval":0,"last_change":"11","last_force_op_resend":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{},"application_metadata":{}},{"pool":3,"pool_name":"sapm_pools","flags":1,"flags_names":"hashpspool","type":1,"size":1,"min_size":1,"crush_rule":0,"object_hash":2,"pg_num":100,"pg_placement_num":100,"crash_replay_interval":0,"last_change":"19","last_force_op_resend":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":3,"snap_epoch":19,"pool_snaps":[],"removed_snaps":"[1~3]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{},"application_metadata":{}}],"osds":[{"osd":0,"uuid":"44559c14-fb71-4183-9e44-167e0e9c057a","up":1,"in":1,"weight":1.000000,"primary_affinity":1.000000,"last_clean_begin":0,"last_clean_end":0,"up_from":7,"up_thru":15,"down_at":6,"lost_at":0,"public_addr":"192.168.1.47:6800/1393","cluster_addr":"192.168.1.47:6801/1393","heartbeat_back_addr":"192.168.1.47:6802/1393","heartbeat_front_addr":"192.168.1.47:6803/1393","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"2019-05-10 18:23:18.070300","laggy_probability":0.000000,"laggy_interval":0,"features":4611087853746454523,"old_weight":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[],"primary_temp":[],"blacklist":{},"erasure_code_profiles":{"default":{"k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}}}`), "", nil},
- `{"format":"json","prefix":"osd perf"}`: {[]byte(`{"osd_perf_infos":[{"id":0,"perf_stats":{"commit_latency_ms":0,"apply_latency_ms":0}}]}`), "", nil},
- `{"format":"json","prefix":"status"}`: {[]byte(`{"fsid":"282d4751-4f33-4186-b983-b51cc21a5a8e","health":{"checks":{"PG_AVAILABILITY":{"severity":"HEALTH_WARN","summary":{"message":"Reduced data availability: 2 pgs inactive"}},"PG_DEGRADED":{"severity":"HEALTH_WARN","summary":{"message":"Degraded data redundancy: 2 pgs undersized"}},"POOL_APP_NOT_ENABLED":{"severity":"HEALTH_WARN","summary":{"message":"application not enabled on 1 pool(s)"}}},"status":"HEALTH_WARN","summary":[{"severity":"HEALTH_WARN","summary":"'ceph health' JSON format has changed in luminous. If you see this your monitoring system is scraping the wrong fields. Disable this with 'mon health preluminous compat warning = false'"}],"overall_status":"HEALTH_WARN"},"election_epoch":5,"quorum":[0],"quorum_names":["openSDS-arpita"],"monmap":{"epoch":1,"fsid":"282d4751-4f33-4186-b983-b51cc21a5a8e","modified":"2019-05-07 11:49:01.502074","created":"2019-05-07 11:49:01.502074","features":{"persistent":["kraken","luminous"],"optional":[]},"mons":[{"rank":0,"name":"openSDS-arpita","addr":"192.168.1.47:6789/0","public_addr":"192.168.1.47:6789/0"}]},"osdmap":{"osdmap":{"epoch":19,"num_osds":1,"num_up_osds":1,"num_in_osds":1,"full":false,"nearfull":false,"num_remapped_pgs":0}},"pgmap":{"pgs_by_state":[{"state_name":"active+clean","count":100},{"state_name":"undersized+peered","count":2}],"num_pgs":102,"num_pools":3,"num_objects":8,"data_bytes":247,"bytes_used":117374976,"bytes_avail":15977197568,"bytes_total":16094572544,"inactive_pgs_ratio":0.019608},"fsmap":{"epoch":1,"by_rank":[]},"mgrmap":{"epoch":9,"active_gid":14097,"active_name":"openSDS-arpita","active_addr":"192.168.1.47:6804/1294","available":true,"standbys":[],"modules":["status"],"available_modules":["balancer","dashboard","influx","localpool","prometheus","restful","selftest","status","zabbix"],"services":{}},"servicemap":{"epoch":1,"modified":"0.000000","services":{}}}`), "", nil},
- `{"format":"json","prefix":"time-sync-status"}`: {[]byte(`{"timechecks":{"epoch":5,"round":0,"round_status":"finished"}}`), "", nil}}
-
-var respMap map[string]*MetricFakeRep = map[string]*MetricFakeRep{"ls": {"opensds-4c5cb264-50d1-4bfd-a663-dface9b669c9", nil}, "info": {`{"name":"opensds-4c5cb264-50d1-4bfd-a663-dface9b669c9","size":1073741824,"objects":1024,"order":20,"object_size":1048576,"block_name_prefix":"rbd_data.1e5246b8b4567","format":2,"features":["layering"],"flags":[],"create_timestamp":"Wed Jun 5 12:45:23 2019"}`, nil}}
-
-type MetricFakeconn struct {
- RespMap map[string]*MetricFakeResp
-}
-
-func NewMetricFakeconn(respMap map[string]*MetricFakeResp) Conn {
- return &MetricFakeconn{RespMap: fakeResp}
-}
-
-type MetricFakeResp struct {
- buf []byte
- info string
- err error
-}
-
-func (n *MetricFakeconn) ReadDefaultConfigFile() error {
- return nil
-}
-
-func (n *MetricFakeconn) Connect() error {
- return nil
-}
-
-func (n *MetricFakeconn) GetFSID() (fsid string, err error) {
- fake_fsid := "b987-654-321"
- return fake_fsid, nil
-}
-func (n *MetricFakeconn) MonCommand(arg []byte) ([]byte, string, error) {
- temp := string(arg)
- var buffer []byte
- if temp != "" {
- buffer = fakeResp[temp].buf
- }
- return buffer, "", nil
-}
-
-func (n *MetricFakeconn) Shutdown() {}
-
-type MetricFakeExecuter struct {
- RespMap map[string]*MetricFakeRep
-}
-
-type MetricFakeRep struct {
- out string
- err error
-}
-
-func (f *MetricFakeExecuter) Run(name string, args ...string) (string, error) {
- var cmd = name
- if name == "env" {
- cmd = args[1]
- }
- v, ok := f.RespMap[cmd]
- if !ok {
- return "", fmt.Errorf("can't find specified op: %s", args[1])
- }
- return v.out, v.err
-}
-func NewMetricFakeExecuter(respMap map[string]*MetricFakeRep) exec.Executer {
- return &MetricFakeExecuter{RespMap: respMap}
-}
-
-func TestCollectMetrics(t *testing.T) {
- var md = &MetricDriver{}
- md.Setup()
- md.cli = &MetricCli{nil, nil}
- md.cli.conn = NewMetricFakeconn(fakeResp)
- md.cli.RootExecuter = NewMetricFakeExecuter(respMap)
- var tempMetricArray []*model.MetricSpec
- for _, element := range expctdMetricList {
- val, _ := strconv.ParseFloat(expected_data[element].Value, 64)
- expctdmetricValue := &model.Metric{
- Timestamp: 123456,
- Value: val,
- }
- expctdMetricValues := make([]*model.Metric, 0)
- expctdMetricValues = append(expctdMetricValues, expctdmetricValue)
- metric := &model.MetricSpec{
- InstanceID: "b987-654-321",
- InstanceName: "opensds-ceph-b987-654-321",
- Job: "ceph",
- Labels: expected_data[element].Var_Label,
- Component: expected_data[element].Component,
- Name: expected_data[element].Name,
- Unit: expected_data[element].Unit,
- AggrType: expected_data[element].AggrType,
- MetricValues: expctdMetricValues,
- }
- tempMetricArray = append(tempMetricArray, metric)
- }
-
- expectedMetrics := tempMetricArray
- retunMetrics, err := md.CollectMetrics()
-
- if err != nil {
- t.Error("failed to collect stats:", err)
- }
- // we can't use deep equal on metric spec objects as the timesatmp calulation is time.Now() in driver
- // validate equivalence of go weteach metricspec fields against expected except timestamp
- var b bool = true
- for i, m := range expectedMetrics {
- b = b && reflect.DeepEqual(m.InstanceName, retunMetrics[i].InstanceName)
- b = b && reflect.DeepEqual(m.InstanceID, retunMetrics[i].InstanceID)
- b = b && reflect.DeepEqual(m.Job, retunMetrics[i].Job)
- for k, _ := range m.Labels {
- b = b && reflect.DeepEqual(m.Labels[k], retunMetrics[i].Labels[k])
- }
- b = b && reflect.DeepEqual(m.Component, retunMetrics[i].Component)
- b = b && reflect.DeepEqual(m.Unit, retunMetrics[i].Unit)
- b = b && reflect.DeepEqual(m.AggrType, retunMetrics[i].AggrType)
- for j, v := range m.MetricValues {
- b = b && reflect.DeepEqual(v.Value, retunMetrics[i].MetricValues[j].Value)
- }
- }
- if !b {
- t.Errorf("expected metric spec")
- for _, p := range expectedMetrics {
- t.Logf("%+v\n", p)
- for _, v := range p.MetricValues {
- t.Logf("%+v\n", v)
- }
- }
- t.Errorf("returned metric spec")
- for _, p := range retunMetrics {
- t.Logf("%+v\n", p)
- for _, v := range p.MetricValues {
- t.Logf("%+v\n", v)
- }
- }
- }
-}
diff --git a/contrib/drivers/ceph/metrics_cli.go b/contrib/drivers/ceph/metrics_cli.go
deleted file mode 100644
index 66366b3aa..000000000
--- a/contrib/drivers/ceph/metrics_cli.go
+++ /dev/null
@@ -1,1014 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-package ceph
-
-import (
- "encoding/json"
- "fmt"
- "strings"
-
- "github.com/ceph/go-ceph/rados"
- log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/utils/exec"
-)
-
-type MetricCli struct {
- conn Conn
- RootExecuter exec.Executer
-}
-
-type Conn interface {
- ReadDefaultConfigFile() error
- Connect() error
- GetFSID() (fsid string, err error)
- Shutdown()
- MonCommand([]byte) ([]byte, string, error)
-}
-
-// Verify that *rados.Conn implements Conn correctly.
-var _ Conn = &rados.Conn{}
-
-func NewMetricCli() (*MetricCli, error) {
-
- conn, err := rados.NewConn()
- if err != nil {
- log.Error("when connecting to rados:", err)
- return nil, err
- }
-
- err = conn.ReadDefaultConfigFile()
- if err != nil {
- log.Error("file ReadDefaultConfigFile can't read", err)
- return nil, err
- }
-
- err = conn.Connect()
- if err != nil {
- log.Error("when connecting to ceph cluster:", err)
- return nil, err
- }
-
- return &MetricCli{
- conn,
- exec.NewRootExecuter(),
- }, nil
-}
-
-type CephMetricStats struct {
- Name string
- Value string
- Unit string
- Const_Label map[string]string
- AggrType string
- Var_Label map[string]string
- Component string
-}
-
-type cephPoolStats struct {
- Pools []struct {
- Name string `json:"name"`
- ID int `json:"id"`
- Stats struct {
- BytesUsed json.Number `json:"bytes_used"`
- RawBytesUsed json.Number `json:"raw_bytes_used"`
- MaxAvail json.Number `json:"max_avail"`
- Objects json.Number `json:"objects"`
- DirtyObjects json.Number `json:"dirty"`
- ReadIO json.Number `json:"rd"`
- ReadBytes json.Number `json:"rd_bytes"`
- WriteIO json.Number `json:"wr"`
- WriteBytes json.Number `json:"wr_bytes"`
- } `json:"stats"`
- } `json:"pools"`
-}
-
-type cephClusterStats struct {
- Stats struct {
- TotalBytes json.Number `json:"total_bytes"`
- TotalUsedBytes json.Number `json:"total_used_bytes"`
- TotalAvailBytes json.Number `json:"total_avail_bytes"`
- TotalObjects json.Number `json:"total_objects"`
- } `json:"stats"`
-}
-
-type cephPerfStat struct {
- PerfInfo []struct {
- ID json.Number `json:"id"`
- Stats struct {
- CommitLatency json.Number `json:"commit_latency_ms"`
- ApplyLatency json.Number `json:"apply_latency_ms"`
- } `json:"perf_stats"`
- } `json:"osd_perf_infos"`
-}
-
-type cephOSDDF struct {
- OSDNodes []struct {
- Name json.Number `json:"name"`
- CrushWeight json.Number `json:"crush_weight"`
- Depth json.Number `json:"depth"`
- Reweight json.Number `json:"reweight"`
- KB json.Number `json:"kb"`
- UsedKB json.Number `json:"kb_used"`
- AvailKB json.Number `json:"kb_avail"`
- Utilization json.Number `json:"utilization"`
- Variance json.Number `json:"var"`
- Pgs json.Number `json:"pgs"`
- } `json:"nodes"`
-
- Summary struct {
- TotalKB json.Number `json:"total_kb"`
- TotalUsedKB json.Number `json:"total_kb_used"`
- TotalAvailKB json.Number `json:"total_kb_avail"`
- AverageUtil json.Number `json:"average_utilization"`
- } `json:"summary"`
-}
-
-type cephOSDDump struct {
- OSDs []struct {
- OSD json.Number `json:"osd"`
- Up json.Number `json:"up"`
- In json.Number `json:"in"`
- } `json:"osds"`
-}
-
-type cephHealthStats struct {
- Health struct {
- Summary []struct {
- Severity string `json:"severity"`
- Summary string `json:"summary"`
- } `json:"summary"`
- OverallStatus string `json:"overall_status"`
- Status string `json:"status"`
- Checks map[string]struct {
- Severity string `json:"severity"`
- Summary struct {
- Message string `json:"message"`
- } `json:"summary"`
- } `json:"checks"`
- } `json:"health"`
- OSDMap struct {
- OSDMap struct {
- NumOSDs json.Number `json:"num_osds"`
- NumUpOSDs json.Number `json:"num_up_osds"`
- NumInOSDs json.Number `json:"num_in_osds"`
- NumRemappedPGs json.Number `json:"num_remapped_pgs"`
- } `json:"osdmap"`
- } `json:"osdmap"`
- PGMap struct {
- NumPGs json.Number `json:"num_pgs"`
- WriteOpPerSec json.Number `json:"write_op_per_sec"`
- ReadOpPerSec json.Number `json:"read_op_per_sec"`
- WriteBytePerSec json.Number `json:"write_bytes_sec"`
- ReadBytePerSec json.Number `json:"read_bytes_sec"`
- RecoveringObjectsPerSec json.Number `json:"recovering_objects_per_sec"`
- RecoveringBytePerSec json.Number `json:"recovering_bytes_per_sec"`
- RecoveringKeysPerSec json.Number `json:"recovering_keys_per_sec"`
- CacheFlushBytePerSec json.Number `json:"flush_bytes_sec"`
- CacheEvictBytePerSec json.Number `json:"evict_bytes_sec"`
- CachePromoteOpPerSec json.Number `json:"promote_op_per_sec"`
- DegradedObjects json.Number `json:"degraded_objects"`
- MisplacedObjects json.Number `json:"misplaced_objects"`
- PGsByState []struct {
- Count float64 `json:"count"`
- States string `json:"state_name"`
- } `json:"pgs_by_state"`
- } `json:"pgmap"`
-}
-
-type cephMonitorStats struct {
- Health struct {
- Health struct {
- HealthServices []struct {
- Mons []struct {
- Name string `json:"name"`
- KBTotal json.Number `json:"kb_total"`
- KBUsed json.Number `json:"kb_used"`
- KBAvail json.Number `json:"kb_avail"`
- AvailPercent json.Number `json:"avail_percent"`
- StoreStats struct {
- BytesTotal json.Number `json:"bytes_total"`
- BytesSST json.Number `json:"bytes_sst"`
- BytesLog json.Number `json:"bytes_log"`
- BytesMisc json.Number `json:"bytes_misc"`
- } `json:"store_stats"`
- } `json:"mons"`
- } `json:"health_services"`
- } `json:"health"`
- TimeChecks struct {
- Mons []struct {
- Name string `json:"name"`
- Skew json.Number `json:"skew"`
- Latency json.Number `json:"latency"`
- } `json:"mons"`
- } `json:"timechecks"`
- } `json:"health"`
- Quorum []int `json:"quorum"`
-}
-
-type cephTimeSyncStatus struct {
- TimeChecks map[string]struct {
- Health string `json:"health"`
- Latency json.Number `json:"latency"`
- Skew json.Number `json:"skew"`
- } `json:"time_skew_status"`
-}
-
-func (cli *MetricCli) CollectPoolMetrics() ([]CephMetricStats, error) {
- returnMap := []CephMetricStats{}
- const_label := make(map[string]string)
- const_label["cluster"] = "ceph"
- cmd, err := json.Marshal(map[string]interface{}{
- "prefix": "df",
- "detail": "detail",
- "format": "json",
- })
- if err != nil {
- log.Errorf("cmd failed with %s\n", err)
- return nil, err
- }
-
- buf, _, err := cli.conn.MonCommand(cmd)
- if err != nil {
- log.Errorf("unable to collect data from ceph df detail")
- return nil, err
- }
-
- pool_stats := &cephPoolStats{}
- if err := json.Unmarshal(buf, pool_stats); err != nil {
- log.Errorf("unmarshal error: %v", err)
- return nil, err
- }
-
- for _, pool := range pool_stats.Pools {
- var_label := make(map[string]string)
- var_label["pool"] = pool.Name
- returnMap = append(returnMap, CephMetricStats{
- "used",
- pool.Stats.BytesUsed.String(),
- "bytes", const_label,
- "",
- var_label,
- "pool"})
-
- returnMap = append(returnMap, CephMetricStats{
- "raw_used",
- pool.Stats.RawBytesUsed.String(),
- "bytes", const_label,
- "",
- var_label,
- "pool"})
-
- returnMap = append(returnMap, CephMetricStats{
- "available",
- pool.Stats.MaxAvail.String(),
- "bytes",
- const_label,
- "",
- var_label,
- "pool"})
-
- returnMap = append(returnMap, CephMetricStats{
- "objects",
- pool.Stats.Objects.String(),
- "",
- const_label,
- "total",
- var_label,
- "pool"})
-
- returnMap = append(returnMap, CephMetricStats{
- "dirty_objects",
- pool.Stats.DirtyObjects.String(),
- "",
- const_label,
- "total",
- var_label,
- "pool"})
-
- returnMap = append(returnMap, CephMetricStats{
- "read", pool.Stats.ReadIO.String(),
- "",
- const_label,
- "total",
- var_label, "pool"})
-
- returnMap = append(returnMap, CephMetricStats{
- "read",
- pool.Stats.ReadBytes.String(),
- "bytes",
- const_label,
- "total",
- var_label, "pool"})
-
- returnMap = append(returnMap, CephMetricStats{
- "write",
- pool.Stats.WriteIO.String(),
- "", const_label,
- "total",
- var_label, "pool"})
-
- returnMap = append(returnMap, CephMetricStats{
- "write",
- pool.Stats.WriteBytes.String(),
- "bytes",
- const_label,
- "total",
- var_label, "pool"})
- }
- return returnMap, nil
-}
-
-func (cli *MetricCli) CollectClusterMetrics() ([]CephMetricStats, error) {
- var returnMap []CephMetricStats
- returnMap = []CephMetricStats{}
- const_label := make(map[string]string)
- const_label["cluster"] = "ceph"
- cmd, err := json.Marshal(map[string]interface{}{
- "prefix": "df",
- "detail": "detail",
- "format": "json",
- })
- if err != nil {
- log.Errorf("cmd failed with %s\n", err)
- return nil, err
- }
- buf, _, err := cli.conn.MonCommand(cmd)
- if err != nil {
- log.Errorf("unable to collect data from ceph df detail")
- return nil, err
- }
- cluster_stats := &cephClusterStats{}
- if err := json.Unmarshal(buf, cluster_stats); err != nil {
- log.Fatalf("unmarshal error: %v", err)
- return nil, err
- }
- returnMap = append(returnMap,
- CephMetricStats{
- "capacity",
- cluster_stats.Stats.TotalBytes.String(),
- "bytes",
- const_label,
- "",
- nil,
- "cluster"},
- CephMetricStats{
- "available",
- cluster_stats.Stats.TotalAvailBytes.String(),
- "bytes",
- const_label,
- "",
- nil,
- "cluster"},
- CephMetricStats{
- "used",
- cluster_stats.Stats.TotalUsedBytes.String(),
- "bytes",
- const_label,
- "",
- nil,
- "cluster"},
- CephMetricStats{
- "objects",
- cluster_stats.Stats.TotalObjects.String(),
- "",
- const_label,
- "", nil, "cluster"},
- )
- return returnMap, nil
-}
-
-func (cli *MetricCli) CollectPerfMetrics() ([]CephMetricStats, error) {
- var returnMap []CephMetricStats
- returnMap = []CephMetricStats{}
- const_label := make(map[string]string)
- const_label["cluster"] = "ceph"
- cmd, err := json.Marshal(map[string]interface{}{
- "prefix": "osd perf",
- "format": "json",
- })
- if err != nil {
- log.Errorf("cmd failed with %s\n", err)
- return nil, err
- }
- buf, _, err := cli.conn.MonCommand(cmd)
- if err != nil {
- log.Errorf("unable to collect data from ceph osd perf")
- return nil, err
- }
- osdPerf := &cephPerfStat{}
- if err := json.Unmarshal(buf, osdPerf); err != nil {
- log.Errorf("unmarshal failed")
- return nil, err
- }
- for _, perfStat := range osdPerf.PerfInfo {
- var_label := make(map[string]string)
- osdID, err := perfStat.ID.Int64()
- if err != nil {
- log.Errorf("when collecting ceph cluster metrics")
- return nil, err
- }
- var_label["osd"] = fmt.Sprintf("osd.%v", osdID)
- returnMap = append(returnMap,
- CephMetricStats{
- "perf_commit_latency",
- perfStat.Stats.CommitLatency.String(),
- "ms",
- const_label,
- "",
- var_label, ""},
- CephMetricStats{
- "perf_apply_latency",
- perfStat.Stats.ApplyLatency.String(),
- "ms",
- const_label,
- "",
- var_label, ""})
- }
- return returnMap, nil
-}
-
-func (cli *MetricCli) CollectOsddfMetrics() ([]CephMetricStats, error) {
- var returnMap []CephMetricStats
- returnMap = []CephMetricStats{}
- const_label := make(map[string]string)
- const_label["cluster"] = "ceph"
- cmd, err := json.Marshal(map[string]interface{}{
- "prefix": "osd df",
- "format": "json",
- })
- if err != nil {
- log.Errorf("cmd failed with %s\n", err)
- return nil, err
- }
- buf, _, err := cli.conn.MonCommand(cmd)
- if err != nil {
- log.Errorf("unable to collect data from ceph osd df")
- return nil, err
- }
- osddf := &cephOSDDF{}
- if err := json.Unmarshal(buf, osddf); err != nil {
- log.Errorf("unmarshal failed")
- return nil, err
- }
- for _, osd_df := range osddf.OSDNodes {
- var_label := make(map[string]string)
- var_label["osd"] = osd_df.Name.String()
- returnMap = append(returnMap,
- CephMetricStats{
- "crush_weight",
- osd_df.CrushWeight.String(),
- "", const_label,
- "",
- var_label, "osd"})
- returnMap = append(returnMap,
- CephMetricStats{
- "depth",
- osd_df.Depth.String(),
- "", const_label,
- "", var_label, "osd"})
-
- returnMap = append(returnMap,
- CephMetricStats{
- "reweight",
- osd_df.Reweight.String(),
- "", const_label,
- "", var_label, "osd"})
-
- osd_df_kb, _ := osd_df.KB.Float64()
- osd_df_bytes := fmt.Sprint(osd_df_kb * 1e3)
- returnMap = append(returnMap,
- CephMetricStats{
- "bytes",
- osd_df_bytes,
- "", const_label,
- "", var_label, "osd"})
- osd_df_kb_used, _ := osd_df.UsedKB.Float64()
- osd_df_bytes_used := fmt.Sprint(osd_df_kb_used * 1e3)
- returnMap = append(returnMap,
- CephMetricStats{
- "bytes_used",
- osd_df_bytes_used,
- "", const_label,
- "", var_label, "osd"})
- osd_df_kb_avail, _ := osd_df.AvailKB.Float64()
- osd_df_bytes_avail := fmt.Sprint(osd_df_kb_avail * 1e3)
- returnMap = append(returnMap,
- CephMetricStats{
- "bytes_avail",
- osd_df_bytes_avail,
- "", const_label,
- "", var_label, "osd"})
- returnMap = append(returnMap,
- CephMetricStats{
- "utilization",
- osd_df.Utilization.String(),
- "", const_label,
- "", var_label, "osd"})
- returnMap = append(returnMap,
- CephMetricStats{
- "var",
- osd_df.Variance.String(),
- "", const_label,
- "", var_label, "osd"})
- returnMap = append(returnMap,
- CephMetricStats{
- "pgs",
- osd_df.Pgs.String(),
- "", const_label,
- "", var_label, "osd"})
- }
- total_kb, _ := osddf.Summary.TotalKB.Float64()
- total_bytes := fmt.Sprint(total_kb * 1e3)
- returnMap = append(returnMap, CephMetricStats{
- "total",
- total_bytes,
- "bytes",
- const_label,
- "",
- nil, "osd"})
-
- total_used_kb, _ := osddf.Summary.TotalUsedKB.Float64()
- total_used_bytes := fmt.Sprint(total_used_kb * 1e3)
- returnMap = append(returnMap, CephMetricStats{
- "total_used",
- total_used_bytes,
- "bytes",
- const_label,
- "",
- nil, "osd"})
-
- total_avail_kb, _ := osddf.Summary.TotalAvailKB.Float64()
- total_avail_bytes := fmt.Sprint(total_avail_kb * 1e3)
- returnMap = append(returnMap, CephMetricStats{
- "total_avail",
- total_avail_bytes,
- "bytes",
- const_label,
- "",
- nil, "osd"})
- returnMap = append(returnMap, CephMetricStats{
- "average_utilization",
- osddf.Summary.AverageUtil.String(),
- "",
- const_label,
- "",
- nil, "osd"})
-
- return returnMap, nil
-}
-
-func (cli *MetricCli) CollectOsddumpMetrics() ([]CephMetricStats, error) {
- var returnMap []CephMetricStats
- returnMap = []CephMetricStats{}
- const_label := make(map[string]string)
- const_label["cluster"] = "ceph"
- cmd, err := json.Marshal(map[string]interface{}{
- "prefix": "osd dump",
- "format": "json",
- })
- if err != nil {
- log.Errorf("cmd failed with %s\n", err)
- return nil, err
- }
- buf, _, err := cli.conn.MonCommand(cmd)
- if err != nil {
- log.Errorf("unable to collect data from ceph osd perf")
- return nil, err
- }
- osd_dump := &cephOSDDump{}
- if err := json.Unmarshal(buf, osd_dump); err != nil {
- log.Errorf("unmarshal failed")
- return nil, err
- }
- var_label := make(map[string]string)
- var_label["osd"] = fmt.Sprintf("osd.%s", osd_dump.OSDs[0].OSD.String())
- returnMap = append(returnMap,
- CephMetricStats{"osd",
- osd_dump.OSDs[0].OSD.String(),
- "",
- const_label,
- "",
- var_label, ""},
- CephMetricStats{
- "osd_up",
- osd_dump.OSDs[0].Up.String(),
- "",
- const_label,
- "",
- var_label, ""},
- CephMetricStats{
- "osd_in",
- osd_dump.OSDs[0].In.String(),
- "",
- const_label,
- "",
- var_label, ""})
- return returnMap, nil
-}
-
-func (cli *MetricCli) CollectHealthMetrics() ([]CephMetricStats, error) {
- returnMap := []CephMetricStats{}
- constlabel := make(map[string]string)
- constlabel["cluster"] = "ceph"
- health_cmd, err := json.Marshal(map[string]interface{}{
- "prefix": "status",
- "format": "json",
- })
- if err != nil {
- log.Errorf("cmd failed with %s\n", err)
- return nil, err
- }
- buff, _, err := cli.conn.MonCommand(health_cmd)
- if err != nil {
- log.Errorf("unable to collect data from ceph status")
- return nil, err
- }
- health_stats := &cephHealthStats{}
- if err := json.Unmarshal(buff, health_stats); err != nil {
- log.Fatalf("unmarshal error: %v", err)
- return nil, err
- }
-
- returnMap = append(returnMap, CephMetricStats{
- "io_write",
- health_stats.PGMap.WriteOpPerSec.String(),
- "ops", constlabel,
- "",
- nil,
- "client"})
-
- returnMap = append(returnMap, CephMetricStats{
- "io_read",
- health_stats.PGMap.ReadBytePerSec.String(),
- "bytes", constlabel,
- "",
- nil,
- "client"})
-
- returnMap = append(returnMap, CephMetricStats{
- "io_read",
- (health_stats.PGMap.ReadOpPerSec.String() + health_stats.PGMap.WriteOpPerSec.String()),
- "ops",
- constlabel,
- "",
- nil,
- "client"})
- returnMap = append(returnMap, CephMetricStats{
- "io_write",
- health_stats.PGMap.WriteBytePerSec.String(),
- "bytes",
- constlabel,
- "",
- nil,
- "client"})
- returnMap = append(returnMap, CephMetricStats{
- "cache_flush_io",
- health_stats.PGMap.CacheFlushBytePerSec.String(),
- "bytes",
- constlabel,
- "",
- nil,
- ""})
- returnMap = append(returnMap, CephMetricStats{
- "cache_evict_io",
- health_stats.PGMap.CacheEvictBytePerSec.String(),
- "bytes",
- constlabel,
- "",
- nil,
- ""})
-
- returnMap = append(returnMap, CephMetricStats{
- "cache_promote_io",
- health_stats.PGMap.CachePromoteOpPerSec.String(),
- "ops",
- constlabel,
- "",
- nil,
- ""})
-
- returnMap = append(returnMap, CephMetricStats{
- "degraded_objects",
- health_stats.PGMap.DegradedObjects.String(),
- "", constlabel,
- "",
- nil,
- ""})
-
- returnMap = append(returnMap, CephMetricStats{
- "misplaced_objects",
- health_stats.PGMap.MisplacedObjects.String(),
- "",
- constlabel,
- "",
- nil,
- ""})
-
- returnMap = append(returnMap, CephMetricStats{
- "osds",
- health_stats.OSDMap.OSDMap.NumOSDs.String(),
- "",
- constlabel,
- "",
- nil,
- ""})
-
- returnMap = append(returnMap, CephMetricStats{
- "osds_up",
- health_stats.OSDMap.OSDMap.NumUpOSDs.String(),
- "",
- constlabel,
- "",
- nil,
- ""})
-
- returnMap = append(returnMap, CephMetricStats{
- "osds_in",
- health_stats.OSDMap.OSDMap.NumInOSDs.String(),
- "",
- constlabel,
- "",
- nil,
- ""})
-
- returnMap = append(returnMap, CephMetricStats{
- "pgs_remapped",
- health_stats.OSDMap.OSDMap.NumRemappedPGs.String(),
- "", constlabel,
- "",
- nil,
- ""})
-
- returnMap = append(returnMap, CephMetricStats{
- "total_pgs",
- health_stats.PGMap.NumPGs.String(),
- "",
- constlabel,
- "",
- nil,
- ""})
- return returnMap, nil
-}
-
-func (cli *MetricCli) CollectMonitorsMetrics() ([]CephMetricStats, error) {
- var returnMap []CephMetricStats
- returnMap = []CephMetricStats{}
- const_label := make(map[string]string)
- const_label["cluster"] = "ceph"
-
- cmd, err := json.Marshal(map[string]interface{}{
- "prefix": "status",
- "format": "json",
- })
- if err != nil {
- log.Errorf("cmd failed with %s\n", err)
- return nil, err
- }
-
- buf, _, err := cli.conn.MonCommand(cmd)
- if err != nil {
- log.Errorf("unable to collect data from ceph status")
- return nil, err
- }
-
- mon_stats := &cephMonitorStats{}
- if err := json.Unmarshal(buf, mon_stats); err != nil {
- log.Fatalf("unmarshal error: %v", err)
- return nil, err
- }
-
- for _, healthService := range mon_stats.Health.Health.HealthServices {
- for _, monstat := range healthService.Mons {
- var_label := make(map[string]string)
- var_label["monitor"] = monstat.Name
- kbTotal, _ := monstat.KBTotal.Float64()
- bytesTotal_val := fmt.Sprintf("%f", kbTotal*1e3)
- returnMap = append(returnMap, CephMetricStats{
- "capacity",
- bytesTotal_val,
- "bytes", const_label,
- "",
- var_label,
- ""})
- kbUsed, _ := monstat.KBUsed.Float64()
- bytesUsed_val := fmt.Sprintf("%f", kbUsed*1e3)
- returnMap = append(returnMap, CephMetricStats{
- "used",
- bytesUsed_val,
- "bytes", const_label,
- "",
- var_label,
- ""})
- kbAvail, _ := monstat.KBAvail.Float64()
- bytesAvail_val := fmt.Sprintf("%f", kbAvail*1e3)
- returnMap = append(returnMap, CephMetricStats{
- "avail",
- bytesAvail_val,
- "bytes", const_label,
- "",
- var_label,
- ""})
- returnMap = append(returnMap, CephMetricStats{
- "avail_percent",
- monstat.AvailPercent.String(),
- "", const_label,
- "",
- var_label,
- ""})
- returnMap = append(returnMap, CephMetricStats{
- "store_capacity",
- monstat.StoreStats.BytesTotal.String(),
- "bytes", const_label,
- "",
- var_label,
- ""})
- returnMap = append(returnMap, CephMetricStats{
- "store_sst",
- monstat.StoreStats.BytesSST.String(),
- "", const_label,
- "bytes",
- var_label,
- ""})
- returnMap = append(returnMap, CephMetricStats{
- "store_log",
- monstat.StoreStats.BytesLog.String(),
- "bytes", const_label,
- "",
- var_label,
- ""})
- returnMap = append(returnMap, CephMetricStats{
- "store_misc",
- monstat.StoreStats.BytesMisc.String(),
- "bytes", const_label,
- "",
- var_label,
- ""})
- }
- }
-
- cmd, err = json.Marshal(map[string]interface{}{
- "prefix": "time-sync-status",
- "format": "json",
- })
- if err != nil {
- log.Errorf("cmd failed with %s\n", err)
- return nil, err
- }
-
- buf, _, err = cli.conn.MonCommand(cmd)
- if err != nil {
- log.Errorf("unable to collect data from ceph time-sync-status")
- return nil, err
- }
-
- timeStats := &cephTimeSyncStatus{}
- if err := json.Unmarshal(buf, mon_stats); err != nil {
- log.Fatalf("unmarshal error: %v", err)
- return nil, err
- }
-
- for monNode, tstat := range timeStats.TimeChecks {
- var_label := make(map[string]string)
- var_label["monitor"] = monNode
- returnMap = append(returnMap, CephMetricStats{
- "clock_skew",
- tstat.Skew.String(),
- "seconds",
- const_label,
- "",
- var_label,
- ""})
- returnMap = append(returnMap, CephMetricStats{
- "latency",
- tstat.Latency.String(),
- "seconds",
- const_label,
- "",
- var_label,
- ""})
- returnMap = append(returnMap, CephMetricStats{
- "quorum_count",
- fmt.Sprintf("%v", mon_stats.Quorum),
- "", const_label,
- "",
- var_label,
- ""})
- }
- return returnMap, nil
-}
-
-func (c *MetricCli) execute(cmd ...string) (string, error) {
- return c.RootExecuter.Run(cmd[0], cmd[1:]...)
-}
-func (cli *MetricCli) CollectVolumeMetrics() ([]CephMetricStats, error) {
- var returnMap []CephMetricStats
- returnMap = []CephMetricStats{}
- const_label := make(map[string]string)
- const_label["cluster"] = "ceph"
- cmd := []string{"env", "rbd", "ls"}
- out, err := cli.execute(cmd...)
- if err != nil {
- log.Errorf("cmd.Run() failed with %s\n", err)
- err = nil
-
- }
- result := strings.Split(string(out), "\n")
-
- for i := 0; i < len(result); i++ {
- if result[i] != "" {
- command := []string{"env", "rbd", "info", result[i], "--format", "json"}
- command_out, _ := cli.execute(command...)
- command_output := strings.Split(string(command_out), ",")
- var output []string
- for j := 0; j < (len(command_output)); j++ {
- result := strings.Split(command_output[j], ":")
- output = append(output, result[1])
-
- }
-
- returnMap = append(returnMap, CephMetricStats{"name",
- result[i],
- "", const_label,
- "", nil,
- "volume"})
- returnMap = append(returnMap, CephMetricStats{"size",
- output[1],
- "bytes", const_label,
- "", nil,
- "volume"})
- returnMap = append(returnMap, CephMetricStats{"objects",
- output[2],
- "", const_label,
- "", nil,
- "volume"})
- returnMap = append(returnMap, CephMetricStats{"object_size",
- output[4],
- "bytes", const_label,
- "", nil,
- "volume"})
- }
- }
-
- return returnMap, nil
-}
-
-func (cli *MetricCli) CollectMetrics() ([]CephMetricStats, []string, error) {
- returnMap := []CephMetricStats{}
- var instance []string
- instanceID, _ := cli.conn.GetFSID()
- instance = append(instance, instanceID)
- instanceName := fmt.Sprintf("%s%s", "opensds-ceph-", instance[0])
- instance = append(instance, instanceName)
- // Collecting Pool Metrics
- pool_metric, _ := cli.CollectPoolMetrics()
- for i := range pool_metric {
- returnMap = append(returnMap, pool_metric[i])
- }
- // Collects Cluster Metrics
- cluster_metric, _ := cli.CollectClusterMetrics()
- for i := range cluster_metric {
- returnMap = append(returnMap, cluster_metric[i])
- }
- // Collects Performance Metrics
- perf_metric, _ := cli.CollectPerfMetrics()
- for i := range perf_metric {
- returnMap = append(returnMap, perf_metric[i])
- }
- // Collects OSD Metrics
- osd_df_metric, _ := cli.CollectOsddfMetrics()
- for i := range osd_df_metric {
- returnMap = append(returnMap, osd_df_metric[i])
- }
- // Collects OSD Dump Metrics
- osd_dump_metric, _ := cli.CollectOsddumpMetrics()
- for i := range osd_dump_metric {
- returnMap = append(returnMap, osd_dump_metric[i])
- }
- // Collects Ceph Health Metrics
- health_metrics, _ := cli.CollectHealthMetrics()
- for i := range health_metrics {
- returnMap = append(returnMap, health_metrics[i])
- }
- // Collects Ceph Monitor Metrics
- monitor_metrics, _ := cli.CollectMonitorsMetrics()
- for i := range monitor_metrics {
- returnMap = append(returnMap, monitor_metrics[i])
- }
- // Collects Ceph Volume Metrics
- volume_metrics, _ := cli.CollectVolumeMetrics()
- for i := range volume_metrics {
- returnMap = append(returnMap, volume_metrics[i])
- }
- return returnMap, instance, nil
-}
diff --git a/contrib/drivers/drbd/consts.go b/contrib/drivers/drbd/consts.go
deleted file mode 100644
index f2ca0cc77..000000000
--- a/contrib/drivers/drbd/consts.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package drbd
-
-import (
- "path/filepath"
-
- "github.com/LINBIT/godrbdutils"
-)
-
-// Probably make some of these configurable later
-const (
- portPostfix = "-drbd-port"
- minorPostfix = "-drbd-minor"
-
- defaultPortMin = 7000
- defaultPortMax = 8000
-
- minorMin = 1
- minorMax = 1000
-
- // for the time being opensds only has one primary and one secondary,
- // but reserve slots for 7 peers anyways
- maxPeers = 7
-
- resDir = "/etc/drbd.d"
- defaultConfPath = "/etc/opensds/driver/drbd.yaml"
-)
-
-type drbdConf struct {
- Hosts []godrbdutils.Host `yaml:"Hosts,omitempty"`
- PortMin int `yaml:"PortMin,omitempty"`
- PortMax int `yaml:"PortMax,omitempty"`
-}
-
-func portKey(s string) string { return s + portPostfix }
-func minorKey(s string) string { return s + minorPostfix }
-
-func resFilePath(resName string) string {
- return filepath.Join(resDir, resName) + ".res"
-}
-
-func cfgOrDefault(c, d int) int {
- if c > 0 {
- return c
- }
- return d
-}
diff --git a/contrib/drivers/drbd/replication.go b/contrib/drivers/drbd/replication.go
deleted file mode 100644
index 84b22e35d..000000000
--- a/contrib/drivers/drbd/replication.go
+++ /dev/null
@@ -1,243 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package drbd
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "strconv"
-
- "github.com/LINBIT/godrbdutils"
- log "github.com/golang/glog"
- "github.com/opensds/opensds/contrib/drivers/utils/config"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
-)
-
-// ReplicationDriver
-type ReplicationDriver struct{}
-
-// Setup
-func (r *ReplicationDriver) Setup() error { return nil }
-
-// Unset
-func (r *ReplicationDriver) Unset() error { return nil }
-
-// CreateReplication
-func (r *ReplicationDriver) CreateReplication(opt *pb.CreateReplicationOpts) (*model.ReplicationSpec, error) {
- log.Infof("DRBD create replication ....")
-
- conf := drbdConf{}
- _, err := config.Parse(&conf, defaultConfPath)
- if err != nil {
- return nil, err
- }
- if len(conf.Hosts) != 2 {
- return nil, fmt.Errorf("Your configuration does not contain exactly 2 hosts")
- }
-
- isPrimary := opt.GetIsPrimary()
- primaryData := opt.GetPrimaryReplicationDriverData()
- secondaryData := opt.GetSecondaryReplicationDriverData()
-
- var myData *map[string]string
- if isPrimary {
- myData = &primaryData
- } else {
- myData = &secondaryData
- }
-
- var myHostName string
- var myHostIP string
- var hok bool
- if myHostName, hok = (*myData)["HostName"]; hok {
- myHostIP, hok = (*myData)["HostIp"]
- }
- if !hok {
- return nil, fmt.Errorf("Data did not contain 'HostIp' or 'HostName' key")
- }
-
- var myHost, peerHost godrbdutils.Host
- for _, h := range conf.Hosts {
- if h.Name == myHostName && h.IP == myHostIP {
- myHost = h
- } else {
- peerHost = h
- }
- }
-
- if myHost.Name == "" || myHost.IP == "" || peerHost.Name == "" || peerHost.IP == "" ||
- myHost.Name == peerHost.Name || myHost.ID == peerHost.ID || myHost.IP == peerHost.IP {
- return nil, fmt.Errorf("Could not find valid hosts")
- }
-
- resName := opt.GetId()
-
- primaryVolID := opt.GetPrimaryVolumeId()
- secondaryVolID := opt.GetSecondaryVolumeId()
- path, _ := filepath.EvalSymlinks(primaryData["Mountpoint"])
- primaryBackingDevice, _ := filepath.Abs(path)
- path, _ = filepath.EvalSymlinks(secondaryData["Mountpoint"])
- secondaryBackingDevice, _ := filepath.Abs(path)
- log.Info(primaryBackingDevice, secondaryBackingDevice)
- // as we use the same minors/ports in primary/secondary, make them a set:
- usedPort := make(map[int]bool)
- usedMinor := make(map[int]bool)
- for _, volData := range opt.GetVolumeDataList() {
- data := volData.GetData()
-
- // check if the current device in the DataList() already has a key for port/minor that belongs to the primary/secondary
- // That would happen for example if the data was not deleted and/or CreateReplication() was called multiple times.
- // if val, ok := data[portKey(primaryVolID)]; ok {
- // return nil, fmt.Errorf("Primary Volume ID (%s), already has a port number (%s)", primaryVolID, val)
- // }
- // if val, ok := data[portKey(secondaryVolID)]; ok {
- // return nil, fmt.Errorf("Secondary Volume ID (%s), already has a port number (%s)", secondaryVolID, val)
- // }
- // if val, ok := data[minorKey(primaryVolID)]; ok {
- // return nil, fmt.Errorf("Primary Volume ID (%s), already has a minor number (%s)", primaryVolID, val)
- // }
- // if val, ok := data[minorKey(secondaryVolID)]; ok {
- // return nil, fmt.Errorf("Secondary Volume ID (%s), already has a minor number (%s)", secondaryVolID, val)
- // }
-
- // get ports and minors still in use
- volID := data["VolumeId"]
- if val, ok := data[portKey(volID)]; ok {
- p, err := strconv.Atoi(val)
- if err != nil {
- return nil, err
- }
- usedPort[p] = true
- }
- if val, ok := data[minorKey(volID)]; ok {
- m, err := strconv.Atoi(val)
- if err != nil {
- return nil, err
- }
- usedMinor[m] = true
- }
- }
-
- portMin := cfgOrDefault(conf.PortMin, defaultPortMin)
- portMax := cfgOrDefault(conf.PortMax, defaultPortMax)
- var up []int
- for k := range usedPort {
- up = append(up, k)
- }
- port, err := godrbdutils.GetNumber(portMin, portMax, up)
- if err != nil {
- return nil, err
- }
-
- var um []int
- for k := range usedMinor {
- um = append(um, k)
- }
- minor, err := godrbdutils.GetNumber(minorMin, minorMax, um)
- if err != nil {
- return nil, err
- }
-
- res := godrbdutils.NewResource(resName, port)
-
- res.AddHost(myHost.ID, myHost.Name, myHost.IP)
- res.AddHost(peerHost.ID, peerHost.Name, peerHost.IP)
-
- drbdVolID := 0 // currently only one volume per DRBD resource
- if isPrimary {
- res.AddVolume(drbdVolID, minor, primaryBackingDevice, myHost.Name)
- res.AddVolume(drbdVolID, minor, secondaryBackingDevice, peerHost.Name)
- } else {
- res.AddVolume(drbdVolID, minor, primaryBackingDevice, peerHost.Name)
- res.AddVolume(drbdVolID, minor, secondaryBackingDevice, myHost.Name)
- }
-
- res.WriteConfig(resFilePath(resName))
-
- // Bring up the resource
- drbdadm := godrbdutils.NewDrbdAdm([]string{resName})
-
- drbdadm.CreateMetaData(fmt.Sprintf("--max-peers=%d", maxPeers), "--force")
- drbdadm.Up()
-
- if isPrimary {
- // start initial sync
- drbdadm.Primary("--force")
- drbdadm.Secondary() // switch back, rest done by auto promote
- }
-
- additionalPrimaryData := map[string]string{
- portKey(primaryVolID): strconv.Itoa(port),
- minorKey(primaryVolID): strconv.Itoa(minor),
- }
-
- additionalSecondaryData := map[string]string{
- portKey(secondaryVolID): strconv.Itoa(port),
- minorKey(secondaryVolID): strconv.Itoa(minor),
- }
-
- return &model.ReplicationSpec{
- // TODO(rck): return additional important information
- PrimaryReplicationDriverData: additionalPrimaryData,
- SecondaryReplicationDriverData: additionalSecondaryData,
- }, nil
-}
-
-func (r *ReplicationDriver) DeleteReplication(opt *pb.DeleteReplicationOpts) error {
- log.Infof("DRBD delete replication ....")
-
- resName := opt.GetId()
-
- drbdadm := godrbdutils.NewDrbdAdm([]string{resName})
- if _, err := drbdadm.Down(); err != nil {
- return err
- }
- if err := os.Remove(resFilePath(resName)); err != nil {
- return err
- }
-
- // reserved minor/port are automatically deleted because they are gone from *ReplicationData
-
- return nil
-}
-
-func (r *ReplicationDriver) EnableReplication(opt *pb.EnableReplicationOpts) error {
- log.Infof("DRBD enable replication ....")
-
- drbdadm := godrbdutils.NewDrbdAdm([]string{opt.GetId()})
- _, err := drbdadm.Adjust()
- return err
-}
-
-func (r *ReplicationDriver) DisableReplication(opt *pb.DisableReplicationOpts) error {
- log.Infof("DRBD disable replication ....")
-
- drbdadm := godrbdutils.NewDrbdAdm([]string{opt.GetId()})
- _, err := drbdadm.Disconnect()
- return err
-}
-
-func (r *ReplicationDriver) FailoverReplication(opt *pb.FailoverReplicationOpts) error {
- log.Infof("DRBD failover replication ....")
- // nothing to do here:
- // The driver returns a block device on both nodes (/dev/drbd$minor and a symlink as /dev/drbd/by-res/$resname)
- // And the driver makes sure that it triggeres an initial sync from the primary to the secondary side
- // Then:
- // When the device is used (open(2) in RW mode, it switches that side to DRBD Primary. That is the "autopromote" feature of DRBD9
- // That happens what ever the user does (use it as raw device with 'dd', or put a file system on it and mount it,...)
- // After the user finished using the device (e.g., umount), the device switches to DRBD Secondary
- // And it can then be used on the second node by just open(2)ing the device again.
- return nil
-}
diff --git a/contrib/drivers/drivers.go b/contrib/drivers/drivers.go
deleted file mode 100755
index 0ea11e3c5..000000000
--- a/contrib/drivers/drivers.go
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module defines an standard table of storage driver. The default storage
-driver is sample driver used for testing. If you want to use other storage
-plugin, just modify Init() and Clean() method.
-
-*/
-
-package drivers
-
-import (
- _ "github.com/opensds/opensds/contrib/backup/multicloud"
- "github.com/opensds/opensds/contrib/drivers/ceph"
- "github.com/opensds/opensds/contrib/drivers/fujitsu/eternus"
- "github.com/opensds/opensds/contrib/drivers/hpe/nimble"
- "github.com/opensds/opensds/contrib/drivers/huawei/fusionstorage"
- "github.com/opensds/opensds/contrib/drivers/huawei/oceanstor"
- "github.com/opensds/opensds/contrib/drivers/ibm/spectrumscale"
- "github.com/opensds/opensds/contrib/drivers/lvm"
- "github.com/opensds/opensds/contrib/drivers/netapp/ontap"
- "github.com/opensds/opensds/contrib/drivers/openstack/cinder"
- "github.com/opensds/opensds/contrib/drivers/utils/config"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- sample "github.com/opensds/opensds/testutils/driver"
-)
-
-// VolumeDriver is an interface for exposing some operations of different volume
-// drivers, currently support sample, lvm, ceph, cinder and so forth.
-type VolumeDriver interface {
- //Any initialization the volume driver does while starting.
- Setup() error
- //Any operation the volume driver does while stopping.
- Unset() error
-
- CreateVolume(opt *pb.CreateVolumeOpts) (*model.VolumeSpec, error)
-
- PullVolume(volIdentifier string) (*model.VolumeSpec, error)
-
- DeleteVolume(opt *pb.DeleteVolumeOpts) error
-
- ExtendVolume(opt *pb.ExtendVolumeOpts) (*model.VolumeSpec, error)
-
- InitializeConnection(opt *pb.CreateVolumeAttachmentOpts) (*model.ConnectionInfo, error)
-
- TerminateConnection(opt *pb.DeleteVolumeAttachmentOpts) error
-
- CreateSnapshot(opt *pb.CreateVolumeSnapshotOpts) (*model.VolumeSnapshotSpec, error)
-
- PullSnapshot(snapIdentifier string) (*model.VolumeSnapshotSpec, error)
-
- DeleteSnapshot(opt *pb.DeleteVolumeSnapshotOpts) error
-
- InitializeSnapshotConnection(opt *pb.CreateSnapshotAttachmentOpts) (*model.ConnectionInfo, error)
-
- TerminateSnapshotConnection(opt *pb.DeleteSnapshotAttachmentOpts) error
-
- // NOTE Parameter vg means complete volume group information, because driver
- // may use it to do something and return volume group status.
- CreateVolumeGroup(opt *pb.CreateVolumeGroupOpts) (*model.VolumeGroupSpec, error)
-
- // NOTE Parameter addVolumesRef or removeVolumesRef means complete volume
- // information that will be added or removed from group. Driver may use
- // them to do some related operations and return their status.
- UpdateVolumeGroup(opt *pb.UpdateVolumeGroupOpts) (*model.VolumeGroupSpec, error)
-
- // NOTE Parameter volumes means volumes deleted from group, driver may use
- // their compelete information to do some related operations and return
- // their status.
- DeleteVolumeGroup(opt *pb.DeleteVolumeGroupOpts) error
-
- ListPools() ([]*model.StoragePoolSpec, error)
-}
-
-// Init
-func Init(resourceType string) VolumeDriver {
- var d VolumeDriver
- switch resourceType {
- case config.CinderDriverType:
- d = &cinder.Driver{}
- break
- case config.CephDriverType:
- d = &ceph.Driver{}
- break
- case config.LVMDriverType:
- d = &lvm.Driver{}
- break
- case config.IBMSpectrumScaleDriverType:
- d = &spectrumscale.Driver{}
- break
- case config.HuaweiOceanStorBlockDriverType:
- d = &oceanstor.Driver{}
- break
- case config.HuaweiFusionStorageDriverType:
- d = &fusionstorage.Driver{}
- case config.HpeNimbleDriverType:
- d = &nimble.Driver{}
- break
- case config.FujitsuEternusDriverType:
- d = &eternus.Driver{}
- break
- case config.NetappOntapSanDriverType:
- d = &ontap.SANDriver{}
- break
- default:
- d = &sample.Driver{}
- break
- }
- d.Setup()
- return d
-}
-
-// Clean
-func Clean(d VolumeDriver) VolumeDriver {
- // Execute different clean operations according to the VolumeDriver type.
- switch d.(type) {
- case *cinder.Driver:
- break
- case *ceph.Driver:
- break
- case *lvm.Driver:
- break
- case *spectrumscale.Driver:
- break
- case *oceanstor.Driver:
- break
- case *fusionstorage.Driver:
- break
- case *nimble.Driver:
- break
- case *eternus.Driver:
- break
- case *ontap.SANDriver:
- break
- default:
- break
- }
- d.Unset()
- d = nil
-
- return d
-}
-
-func CleanMetricDriver(d MetricDriver) MetricDriver {
- // Execute different clean operations according to the MetricDriver type.
- switch d.(type) {
- case *lvm.MetricDriver:
- break
- default:
- break
- }
- _ = d.Teardown()
- d = nil
-
- return d
-}
-
-type MetricDriver interface {
- //Any initialization the metric driver does while starting.
- Setup() error
- //Any operation the metric driver does while stopping.
- Teardown() error
- // Collect metrics for all supported resources
- CollectMetrics() ([]*model.MetricSpec, error)
-}
-
-// Init
-func InitMetricDriver(resourceType string) MetricDriver {
- var d MetricDriver
- switch resourceType {
- case config.LVMDriverType:
- d = &lvm.MetricDriver{}
- break
- case config.CephDriverType:
- d = &ceph.MetricDriver{}
- break
- case config.HuaweiOceanStorBlockDriverType:
- d = &oceanstor.MetricDriver{}
- break
- default:
- //d = &sample.Driver{}
- break
- }
- d.Setup()
- return d
-}
diff --git a/contrib/drivers/drivers_test.go b/contrib/drivers/drivers_test.go
deleted file mode 100755
index 20f77154a..000000000
--- a/contrib/drivers/drivers_test.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package drivers
-
-import (
- "reflect"
- "testing"
-
- "github.com/opensds/opensds/contrib/drivers/ceph"
- "github.com/opensds/opensds/contrib/drivers/lvm"
- "github.com/opensds/opensds/contrib/drivers/openstack/cinder"
- sample "github.com/opensds/opensds/testutils/driver"
-)
-
-func TestInit(t *testing.T) {
- var rsList = []string{"others"}
- var expectedVd = []VolumeDriver{&sample.Driver{}}
-
- for i, rs := range rsList {
- if vp := Init(rs); !reflect.DeepEqual(vp, expectedVd[i]) {
- t.Errorf("Expected %v, got %v\n", expectedVd, vp)
- }
- }
-}
-
-func TestClean(t *testing.T) {
- var driverList = []VolumeDriver{
- &ceph.Driver{},
- &lvm.Driver{},
- &cinder.Driver{},
- &sample.Driver{},
- }
-
- for _, driver := range driverList {
- if d := Clean(driver); !reflect.DeepEqual(d, nil) {
- t.Errorf("Expected %v, got %v\n", nil, d)
- }
- }
-}
diff --git a/contrib/drivers/filesharedrivers/chubaofs/chubaofs.go b/contrib/drivers/filesharedrivers/chubaofs/chubaofs.go
deleted file mode 100644
index 7f317f38d..000000000
--- a/contrib/drivers/filesharedrivers/chubaofs/chubaofs.go
+++ /dev/null
@@ -1,249 +0,0 @@
-// Copyright (c) 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package chubaofs
-
-import (
- "errors"
- "fmt"
- "os"
- "path"
-
- log "github.com/golang/glog"
- . "github.com/opensds/opensds/contrib/drivers/utils/config"
- . "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- "github.com/opensds/opensds/pkg/utils/config"
- uuid "github.com/satori/go.uuid"
-)
-
-const (
- DefaultConfPath = "/etc/opensds/driver/chubaofs.yaml"
- NamePrefix = "chubaofs"
-)
-
-const (
- KMountPoint = "mountPoint"
- KVolumeName = "volName"
- KMasterAddr = "masterAddr"
- KLogDir = "logDir"
- KWarnLogDir = "warnLogDir"
- KLogLevel = "logLevel"
- KOwner = "owner"
- KProfPort = "profPort"
-)
-
-const (
- KClientPath = "clientPath"
-)
-
-const (
- defaultLogLevel = "error"
- defaultOwner = "chubaofs"
- defaultProfPort = "10094"
-
- defaultVolumeCapLimit int64 = 1000000
-)
-
-const (
- clientConfigFileName = "client.json"
- clientCmdName = "cfs-client"
-)
-
-type ClusterInfo struct {
- Name string `yaml:"name"`
- MasterAddr []string `yaml:"masterAddr"`
- VolumeCapLimit int64 `yaml:"volumeCapLimit"`
-}
-
-type RuntimeEnv struct {
- MntPoint string `yaml:"mntPoint"`
- ClientPath string `yaml:"clientPath"`
- LogLevel string `yaml:"logLevel"`
- Owner string `yaml:"owner"`
- ProfPort string `yaml:"profPort"`
-}
-
-type Config struct {
- ClusterInfo `yaml:"clusterInfo"`
- RuntimeEnv `yaml:"runtimeEnv"`
-
- Pool map[string]PoolProperties `yaml:"pool,flow"`
-}
-
-type Driver struct {
- conf *Config
-}
-
-func (d *Driver) Setup() error {
- conf := &Config{}
- path := config.CONF.OsdsDock.Backends.Chubaofs.ConfigPath
- if "" == path {
- path = DefaultConfPath
- }
-
- if _, err := Parse(conf, path); err != nil {
- return err
- }
-
- if conf.MntPoint == "" || conf.ClientPath == "" {
- return errors.New(fmt.Sprintf("chubaofs: lack of necessary config, mntPoint(%v) clientPath(%v)", conf.MntPoint, conf.ClientPath))
- }
-
- if conf.VolumeCapLimit <= 0 {
- conf.VolumeCapLimit = defaultVolumeCapLimit
- }
-
- d.conf = conf
- return nil
-}
-
-func (d *Driver) Unset() error {
- return nil
-}
-
-func (d *Driver) CreateFileShare(opt *pb.CreateFileShareOpts) (fshare *FileShareSpec, err error) {
- log.Info("CreateFileShare ...")
-
- volName := opt.GetId()
- volSize := opt.GetSize()
-
- configFiles, fsMntPoints, owner, err := prepareConfigFiles(d, opt)
- if err != nil {
- return nil, err
- }
-
- /*
- * Only the master raft leader can repsonse to create volume requests.
- */
- leader, err := getClusterInfo(d.conf.MasterAddr[0])
- if err != nil {
- return nil, err
- }
-
- err = createOrDeleteVolume(createVolumeRequest, leader, volName, owner, volSize)
- if err != nil {
- return nil, err
- }
-
- err = doMount(clientCmdName, configFiles)
- if err != nil {
- doUmount(fsMntPoints)
- createOrDeleteVolume(deleteVolumeRequest, leader, volName, owner, 0)
- return nil, err
- }
-
- log.Infof("Start client daemon successful: volume name: %v", volName)
-
- fshare = &FileShareSpec{
- BaseModel: &BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Size: opt.GetSize(),
- Description: opt.GetDescription(),
- AvailabilityZone: opt.GetAvailabilityZone(),
- PoolId: opt.GetPoolId(),
- ExportLocations: fsMntPoints,
- Metadata: map[string]string{
- KVolumeName: volName,
- KClientPath: d.conf.ClientPath,
- KOwner: owner,
- },
- }
-
- return fshare, nil
-}
-
-func (d *Driver) DeleteFileShare(opts *pb.DeleteFileShareOpts) error {
- volName := opts.GetMetadata()[KVolumeName]
- clientPath := opts.GetMetadata()[KClientPath]
- owner := opts.GetMetadata()[KOwner]
- fsMntPoints := make([]string, 0)
- fsMntPoints = append(fsMntPoints, opts.ExportLocations...)
-
- /*
- * Umount export locations
- */
- err := doUmount(fsMntPoints)
- if err != nil {
- return err
- }
-
- /*
- * Remove generated mount points dir
- */
- for _, mnt := range fsMntPoints {
- err = os.RemoveAll(mnt)
- if err != nil {
- return errors.New(fmt.Sprintf("chubaofs: failed to remove export locations, err: %v", err))
- }
- }
-
- /*
- * Remove generated client runtime path
- */
- err = os.RemoveAll(path.Join(clientPath, volName))
- if err != nil {
- return errors.New(fmt.Sprintf("chubaofs: failed to remove client path %v , volume name: %v , err: %v", clientPath, volName, err))
- }
-
- /*
- * Only the master raft leader can repsonse to delete volume requests.
- */
- leader, err := getClusterInfo(d.conf.MasterAddr[0])
- if err != nil {
- return err
- }
- err = createOrDeleteVolume(deleteVolumeRequest, leader, volName, owner, 0)
- return err
-}
-
-func (d *Driver) CreateFileShareSnapshot(opts *pb.CreateFileShareSnapshotOpts) (*FileShareSnapshotSpec, error) {
- return nil, &NotImplementError{"CreateFileShareSnapshot not implemented yet"}
-}
-
-func (d *Driver) DeleteFileShareSnapshot(opts *pb.DeleteFileShareSnapshotOpts) error {
- return &NotImplementError{"DeleteFileShareSnapshot not implemented yet"}
-}
-
-func (d *Driver) CreateFileShareAcl(opts *pb.CreateFileShareAclOpts) (*FileShareAclSpec, error) {
- return nil, &NotImplementError{"CreateFileShareAcl not implemented yet"}
-}
-
-func (d *Driver) DeleteFileShareAcl(opts *pb.DeleteFileShareAclOpts) error {
- return &NotImplementError{"DeleteFileShareAcl not implemented yet"}
-}
-
-func (d *Driver) ListPools() ([]*StoragePoolSpec, error) {
- pools := make([]*StoragePoolSpec, 0)
- for name, prop := range d.conf.Pool {
- pool := &StoragePoolSpec{
- BaseModel: &BaseModel{
- Id: uuid.NewV5(uuid.NamespaceOID, name).String(),
- },
- Name: name,
- TotalCapacity: d.conf.VolumeCapLimit,
- FreeCapacity: d.conf.VolumeCapLimit,
- StorageType: prop.StorageType,
- Extras: prop.Extras,
- AvailabilityZone: prop.AvailabilityZone,
- }
- if pool.AvailabilityZone == "" {
- pool.AvailabilityZone = "default"
- }
- pools = append(pools, pool)
- }
- return pools, nil
-}
diff --git a/contrib/drivers/filesharedrivers/chubaofs/util.go b/contrib/drivers/filesharedrivers/chubaofs/util.go
deleted file mode 100644
index 499491a72..000000000
--- a/contrib/drivers/filesharedrivers/chubaofs/util.go
+++ /dev/null
@@ -1,343 +0,0 @@
-package chubaofs
-
-import (
- "crypto/md5"
- "encoding/hex"
- "encoding/json"
- "errors"
- "fmt"
- "io/ioutil"
- "net/http"
- "os"
- "os/exec"
- "path"
- "path/filepath"
- "strconv"
- "strings"
-
- log "github.com/golang/glog"
- pb "github.com/opensds/opensds/pkg/model/proto"
-)
-
-type RequestType int
-
-func (t RequestType) String() string {
- switch t {
- case createVolumeRequest:
- return "CreateVolume"
- case deleteVolumeRequest:
- return "DeleteVolume"
- default:
- }
- return "N/A"
-}
-
-const (
- createVolumeRequest RequestType = iota
- deleteVolumeRequest
-)
-
-type clusterInfoResponseData struct {
- LeaderAddr string `json:"LeaderAddr"`
-}
-
-type clusterInfoResponse struct {
- Code int `json:"code"`
- Msg string `json:"msg"`
- Data *clusterInfoResponseData `json:"data"`
-}
-
-// Create and Delete Volume Response
-type generalVolumeResponse struct {
- Code int `json:"code"`
- Msg string `json:"msg"`
- Data string `json:"data"`
-}
-
-/*
- * This functions sends http request to the on-premise cluster to
- * get cluster info.
- */
-func getClusterInfo(host string) (string, error) {
- url := "http://" + host + "/admin/getCluster"
- log.Infof("chubaofs: GetClusterInfo(%v)", url)
-
- httpResp, err := http.Get(url)
- if err != nil {
- log.Errorf("chubaofs: failed to GetClusterInfo, url(%v) err(%v)", url, err)
- return "", err
- }
- defer httpResp.Body.Close()
-
- body, err := ioutil.ReadAll(httpResp.Body)
- if err != nil {
- log.Errorf("chubaofs: failed to read response, url(%v) err(%v)", url, err)
- return "", err
- }
-
- resp := &clusterInfoResponse{}
- if err = json.Unmarshal(body, resp); err != nil {
- errmsg := fmt.Sprintf("chubaofs: getClusterInf failed to unmarshal, bodyLen(%d) err(%v)", len(body), err)
- log.Error(errmsg)
- return "", errors.New(errmsg)
- }
-
- log.Infof("chubaofs: GetClusterInfo, url(%v), resp(%v)", url, resp)
-
- if resp.Code != 0 {
- errmsg := fmt.Sprintf("chubaofs: GetClusterInfo code NOK, url(%v) code(%v) msg(%v)", url, resp.Code, resp.Msg)
- log.Error(errmsg)
- return "", errors.New(errmsg)
- }
-
- if resp.Data == nil {
- errmsg := fmt.Sprintf("chubaofs: GetClusterInfo nil data, url(%v) code(%v) msg(%v)", url, resp.Code, resp.Msg)
- log.Error(errmsg)
- return "", errors.New(errmsg)
- }
-
- return resp.Data.LeaderAddr, nil
-}
-
-/*
- * This function sends http request to the on-premise cluster to create
- * or delete a volume according to request type.
- */
-func createOrDeleteVolume(req RequestType, leader, name, owner string, size int64) error {
- var url string
-
- switch req {
- case createVolumeRequest:
- sizeInGB := size
- url = fmt.Sprintf("http://%s/admin/createVol?name=%s&capacity=%v&owner=%v", leader, name, sizeInGB, owner)
- case deleteVolumeRequest:
- key := md5.New()
- if _, err := key.Write([]byte(owner)); err != nil {
- return errors.New(fmt.Sprintf("chubaofs: failed to get md5 sum of owner err(%v)", err))
- }
- url = fmt.Sprintf("http://%s/vol/delete?name=%s&authKey=%v", leader, name, hex.EncodeToString(key.Sum(nil)))
- default:
- return errors.New("chubaofs: request type not recognized! %v")
- }
-
- log.Infof("chubaofs: %v url(%v)", req, url)
-
- httpResp, err := http.Get(url)
- if err != nil {
- errmsg := fmt.Sprintf("chubaofs: %v failed, url(%v) err(%v)", req, url, err)
- return errors.New(errmsg)
- }
- defer httpResp.Body.Close()
-
- body, err := ioutil.ReadAll(httpResp.Body)
- if err != nil {
- errmsg := fmt.Sprintf("chubaofs: %v failed to read http response body, bodyLen(%d) err(%v)", req, len(body), err)
- return errors.New(errmsg)
- }
-
- resp := &generalVolumeResponse{}
- if err := json.Unmarshal(body, resp); err != nil {
- errmsg := fmt.Sprintf("chubaofs: %v failed to unmarshal, url(%v) msg(%v)", req, url, resp.Msg)
- return errors.New(errmsg)
- }
-
- if resp.Code != 0 {
- errmsg := fmt.Sprintf("chubaofs: %v failed, url(%v) code(%v) msg(%v)", req, url, resp.Code, resp.Msg)
- return errors.New(errmsg)
- }
-
- log.Infof("chubaofs: %v url(%v) successful!", req, url)
- return nil
-}
-
-func doMount(cmdName string, confFile []string) error {
- env := []string{
- fmt.Sprintf("PATH=%s", os.Getenv("PATH")),
- }
-
- for _, conf := range confFile {
- cmd := exec.Command(cmdName, "-c", conf)
- cmd.Env = append(cmd.Env, env...)
- if msg, err := cmd.CombinedOutput(); err != nil {
- return errors.New(fmt.Sprintf("chubaofs: failed to start client daemon, msg: %v , err: %v", string(msg), err))
- }
- }
- return nil
-}
-
-func doUmount(mntPoints []string) error {
- env := []string{
- fmt.Sprintf("PATH=%s", os.Getenv("PATH")),
- }
-
- for _, mnt := range mntPoints {
- cmd := exec.Command("umount", mnt)
- cmd.Env = append(cmd.Env, env...)
- msg, err := cmd.CombinedOutput()
- if err != nil {
- return errors.New(fmt.Sprintf("chubaofs: failed to umount, msg: %v , err: %v", msg, err))
- }
- }
- return nil
-}
-
-/*
- * This function creates client config files according to export locations.
- */
-func prepareConfigFiles(d *Driver, opt *pb.CreateFileShareOpts) (configFiles, fsMntPoints []string, owner string, err error) {
- volName := opt.GetId()
- configFiles = make([]string, 0)
- fsMntPoints = make([]string, 0)
-
- /*
- * Check client runtime path.
- */
- fi, err := os.Stat(d.conf.ClientPath)
- if err != nil || !fi.Mode().IsDir() {
- err = errors.New(fmt.Sprintf("chubaofs: invalid client runtime path, path: %v, err: %v", d.conf.ClientPath, err))
- return
- }
-
- clientConf := path.Join(d.conf.ClientPath, volName, "conf")
- clientLog := path.Join(d.conf.ClientPath, volName, "log")
- clientWarnLog := path.Join(d.conf.ClientPath, volName, "warnlog")
-
- if err = os.MkdirAll(clientConf, os.ModeDir); err != nil {
- err = errors.New(fmt.Sprintf("chubaofs: failed to create client config dir, path: %v , err: %v", clientConf, err))
- return
- }
- defer func() {
- if err != nil {
- log.Warningf("chubaofs: cleaning config dir, %v", clientConf)
- os.RemoveAll(clientConf)
- }
- }()
-
- if err = os.MkdirAll(clientLog, os.ModeDir); err != nil {
- err = errors.New(fmt.Sprintf("chubaofs: failed to create client log dir, path: %v", clientLog))
- return
- }
- defer func() {
- if err != nil {
- log.Warningf("chubaofs: cleaning log dir, %v", clientLog)
- os.RemoveAll(clientLog)
- }
- }()
-
- if err = os.MkdirAll(clientWarnLog, os.ModeDir); err != nil {
- err = errors.New(fmt.Sprintf("chubaofs: failed to create client warn log dir, path: %v , err: %v", clientWarnLog, err))
- return
- }
- defer func() {
- if err != nil {
- log.Warningf("chubaofs: cleaning warn log dir, %v", clientWarnLog)
- os.RemoveAll(clientWarnLog)
- }
- }()
-
- /*
- * Check and create mount point directory.
- * Mount point dir has to be newly created.
- */
- locations := make([]string, 0)
- if len(opt.ExportLocations) == 0 {
- locations = append(locations, path.Join(d.conf.MntPoint, volName))
- } else {
- locations = append(locations, opt.ExportLocations...)
- }
-
- /*
- * Mount point has to be absolute path to avoid umounting the current
- * working directory.
- */
- fsMntPoints, err = createAbsMntPoints(locations)
- if err != nil {
- return
- }
-
- defer func() {
- if err != nil {
- for _, mnt := range fsMntPoints {
- os.RemoveAll(mnt)
- }
- }
- }()
-
- /*
- * Generate client mount config file.
- */
- mntConfig := make(map[string]interface{})
- mntConfig[KVolumeName] = volName
- mntConfig[KMasterAddr] = strings.Join(d.conf.MasterAddr, ",")
- mntConfig[KLogDir] = clientLog
- mntConfig[KWarnLogDir] = clientWarnLog
- if d.conf.LogLevel != "" {
- mntConfig[KLogLevel] = d.conf.LogLevel
- } else {
- mntConfig[KLogLevel] = defaultLogLevel
- }
- if d.conf.Owner != "" {
- mntConfig[KOwner] = d.conf.Owner
- } else {
- mntConfig[KOwner] = defaultOwner
- }
- if d.conf.ProfPort != "" {
- mntConfig[KProfPort] = d.conf.ProfPort
- } else {
- mntConfig[KProfPort] = defaultProfPort
- }
-
- owner = mntConfig[KOwner].(string)
-
- for i, mnt := range fsMntPoints {
- mntConfig[KMountPoint] = mnt
- data, e := json.MarshalIndent(mntConfig, "", " ")
- if e != nil {
- err = errors.New(fmt.Sprintf("chubaofs: failed to generate client config file, err(%v)", e))
- return
- }
- filePath := path.Join(clientConf, strconv.Itoa(i), clientConfigFileName)
- _, e = generateFile(filePath, data)
- if e != nil {
- err = errors.New(fmt.Sprintf("chubaofs: failed to generate client config file, err(%v)", e))
- return
- }
- configFiles = append(configFiles, filePath)
- }
-
- return
-}
-
-/*
- * This function creates mount points according to the specified paths,
- * and returns the absolute paths.
- */
-func createAbsMntPoints(locations []string) (mntPoints []string, err error) {
- mntPoints = make([]string, 0)
- for _, loc := range locations {
- mnt, e := filepath.Abs(loc)
- if e != nil {
- err = errors.New(fmt.Sprintf("chubaofs: failed to get absolute path of export locations, loc: %v , err: %v", loc, e))
- return
- }
- if e = os.MkdirAll(mnt, os.ModeDir); e != nil {
- err = errors.New(fmt.Sprintf("chubaofs: failed to create mount point dir, mnt: %v , err: %v", mnt, e))
- return
- }
- mntPoints = append(mntPoints, mnt)
- }
- return
-}
-
-/*
- * This function generates the target file with specified path, and writes data.
- */
-func generateFile(filePath string, data []byte) (int, error) {
- os.MkdirAll(path.Dir(filePath), os.ModePerm)
- fw, err := os.Create(filePath)
- if err != nil {
- return 0, err
- }
- defer fw.Close()
- return fw.Write(data)
-}
diff --git a/contrib/drivers/filesharedrivers/drivers.go b/contrib/drivers/filesharedrivers/drivers.go
deleted file mode 100644
index ae2ce6355..000000000
--- a/contrib/drivers/filesharedrivers/drivers.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module defines an standard table of storage driver. The default storage
-driver is sample driver used for testing. If you want to use other storage
-plugin, just modify Init() and Clean() method.
-*/
-
-package filesharedrivers
-
-import (
- "github.com/opensds/opensds/contrib/drivers/filesharedrivers/chubaofs"
- "github.com/opensds/opensds/contrib/drivers/filesharedrivers/manila"
- nfs "github.com/opensds/opensds/contrib/drivers/filesharedrivers/nfs"
- "github.com/opensds/opensds/contrib/drivers/filesharedrivers/oceanstor"
- "github.com/opensds/opensds/contrib/drivers/utils/config"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- sample "github.com/opensds/opensds/testutils/driver"
-)
-
-type FileShareDriver interface {
- //Any initialization the fileshare driver does while starting.
- Setup() error
- //Any operation the fileshare driver does while stopping.
- Unset() error
-
- CreateFileShare(opt *pb.CreateFileShareOpts) (*model.FileShareSpec, error)
-
- DeleteFileShare(opts *pb.DeleteFileShareOpts) error
-
- CreateFileShareSnapshot(opts *pb.CreateFileShareSnapshotOpts) (*model.FileShareSnapshotSpec, error)
-
- DeleteFileShareSnapshot(opts *pb.DeleteFileShareSnapshotOpts) error
-
- CreateFileShareAcl(opt *pb.CreateFileShareAclOpts) (*model.FileShareAclSpec, error)
-
- DeleteFileShareAcl(opt *pb.DeleteFileShareAclOpts) error
-
- ListPools() ([]*model.StoragePoolSpec, error)
-}
-
-// Init
-func Init(resourceType string) FileShareDriver {
- var f FileShareDriver
- switch resourceType {
- case config.NFSDriverType:
- f = &nfs.Driver{}
- break
- case config.HuaweiOceanStorFileDriverType:
- f = &oceanstor.Driver{}
- break
- case config.ManilaDriverType:
- f = &manila.Driver{}
- break
- case config.ChubaofsDriverType:
- f = &chubaofs.Driver{}
- break
- default:
- f = &sample.Driver{}
- break
- }
- f.Setup()
- return f
-}
-
-// Clean
-func Clean(f FileShareDriver) FileShareDriver {
- // Execute different clean operations according to the FileShareDriver type.
- switch f.(type) {
- case *nfs.Driver:
- break
- case *chubaofs.Driver:
- break
- case *sample.Driver:
- break
- default:
- break
- }
- _ = f.Unset()
- f = nil
-
- return f
-}
diff --git a/contrib/drivers/filesharedrivers/manila/manila.go b/contrib/drivers/filesharedrivers/manila/manila.go
deleted file mode 100644
index b5255bcb9..000000000
--- a/contrib/drivers/filesharedrivers/manila/manila.go
+++ /dev/null
@@ -1,465 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements manila driver for OpenSDS. Manila driver will pass
-these operation requests about fileshare to gophercloud which is an OpenStack
-Go SDK.
-*/
-
-package manila
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "strings"
- "time"
-
- log "github.com/golang/glog"
- "github.com/gophercloud/gophercloud"
- "github.com/gophercloud/gophercloud/openstack"
- sharesv2 "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares"
- snapshotsv2 "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/snapshots"
- driverConfig "github.com/opensds/opensds/contrib/drivers/utils/config"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- "github.com/opensds/opensds/pkg/utils/config"
- "github.com/opensds/opensds/pkg/utils/pwd"
- uuid "github.com/satori/go.uuid"
-)
-
-const (
- defaultConfPath = "/etc/opensds/driver/manila.yaml"
- // KManilaShareID is the UUID of the share in mannila.
- KManilaShareID = "manilaShareID"
- // KManilaSnapID is the UUID of the share snapshot in mannila.
- KManilaSnapID = "manilaSnapId"
- // KManilaShareACLID is the UUID of the share acl in mannila.
- KManilaShareACLID = "manilaAclId"
-)
-
-// Driver is a struct of manila backend.
-type Driver struct {
- sharedFileSystemV2 *gophercloud.ServiceClient
- conf *Config
-}
-
-// AuthOptions corresponds to the authentication configuration in manila.yaml
-type AuthOptions struct {
- IdentityEndpoint string `yaml:"endpoint,omitempty"`
- DomainID string `yaml:"domainId,omitempty"`
- DomainName string `yaml:"domainName,omitempty"`
- Username string `yaml:"username,omitempty"`
- Password string `yaml:"password,omitempty"`
- PwdEncrypter string `yaml:"pwdEncrypter,omitempty"`
- EnableEncrypted bool `yaml:"enableEncrypted,omitempty"`
- TenantID string `yaml:"tenantId,omitempty"`
- TenantName string `yaml:"tenantName,omitempty"`
-}
-
-// Config is a struct for parsing manila.yaml
-type Config struct {
- AuthOptions `yaml:"authOptions"`
- Pool map[string]driverConfig.PoolProperties `yaml:"pool,flow"`
-}
-
-// Setup implementation
-func (d *Driver) Setup() error {
- // Read manila config file
- d.conf = &Config{}
- p := config.CONF.OsdsDock.Backends.Manila.ConfigPath
- if "" == p {
- p = defaultConfPath
- }
-
- driverConfig.Parse(d.conf, p)
- var pwdCiphertext = d.conf.Password
- if d.conf.EnableEncrypted {
- // Decrypte the password
- pwdTool := pwd.NewPwdEncrypter(d.conf.PwdEncrypter)
- password, err := pwdTool.Decrypter(pwdCiphertext)
- if err != nil {
- return err
- }
- pwdCiphertext = password
- }
-
- authOpts := gophercloud.AuthOptions{
- IdentityEndpoint: d.conf.IdentityEndpoint,
- DomainID: d.conf.DomainID,
- DomainName: d.conf.DomainName,
- Username: d.conf.Username,
- Password: pwdCiphertext,
- TenantID: d.conf.TenantID,
- TenantName: d.conf.TenantName,
- }
-
- provider, err := openstack.AuthenticatedClient(authOpts)
- if err != nil {
- log.Error("openstack.AuthenticatedClient failed:", err)
- return err
- }
-
- d.sharedFileSystemV2, err = openstack.NewSharedFileSystemV2(provider,
- gophercloud.EndpointOpts{})
- if err != nil {
- log.Error("openstack.NewSharedFileSystemV2 failed:", err)
- return err
- }
-
- log.V(5).Info("setup succeeded\n")
- return nil
-}
-
-// Unset implementation
-func (d *Driver) Unset() error { return nil }
-
-// ListPools implementation
-func (d *Driver) ListPools() ([]*model.StoragePoolSpec, error) {
- // This feature is currently not implemented in gophercloud.
- // See issue: https://github.com/gophercloud/gophercloud/issues/1546
- // "Support Shared File Systems Storage Pools resource #1546"
- var pols []*model.StoragePoolSpec
- poolName := "pool1"
-
- pol := &model.StoragePoolSpec{
- BaseModel: &model.BaseModel{
- Id: uuid.NewV5(uuid.NamespaceOID, poolName).String(),
- },
- Name: poolName,
- TotalCapacity: 100,
- FreeCapacity: 100,
- StorageType: d.conf.Pool[poolName].StorageType,
- AvailabilityZone: d.conf.Pool[poolName].AvailabilityZone,
- Extras: d.conf.Pool[poolName].Extras,
- }
-
- pols = append(pols, pol)
- log.V(5).Infof("function ListPools succeeded, pols:%+v\n", pols)
- return pols, nil
-}
-
-// CreateFileShare implementation
-func (d *Driver) CreateFileShare(opt *pb.CreateFileShareOpts) (*model.FileShareSpec, error) {
- prf := opt.GetProfile()
- shareProto, err := d.GetProtoFromProfile(prf)
- if err != nil {
- return nil, err
- }
-
- // Configure create request body.
- opts := &sharesv2.CreateOpts{
- ShareProto: shareProto,
- Size: int(opt.GetSize()),
- Name: opt.GetName(),
- Description: opt.GetDescription(),
- ShareType: "dhss_false",
- Metadata: opt.GetMetadata(),
- // Manila's default AvailabilityZone is not "default", but ""
- //AvailabilityZone: opt.GetAvailabilityZone(),
- }
-
- share, err := sharesv2.Create(d.sharedFileSystemV2, opts).Extract()
- if err != nil {
- log.Errorf("cannot create share, err:%v, CreateOpts:%+v\n", err, opts)
- return nil, err
- }
-
- log.V(5).Infof("sharesv2.Create succeeded\n")
- // Currently dock framework doesn't support sync data from storage system,
- // therefore, it's necessary to wait for the result of resource's creation.
- // Timout after 10s.
- timeout := time.After(10 * time.Second)
- ticker := time.NewTicker(300 * time.Millisecond)
- done := make(chan bool, 1)
- go func() {
- for {
- select {
- case <-ticker.C:
- tmpShare, err := d.PullFileShare(share.ID)
- if err != nil {
- continue
- }
- if tmpShare.Status != "creating" {
- share.Status = tmpShare.Status
- close(done)
- return
- }
- case <-timeout:
- close(done)
- return
- }
-
- }
- }()
- <-done
-
- d.sharedFileSystemV2.Microversion = "2.14"
- manilaExportLocations, err := sharesv2.GetExportLocations(d.sharedFileSystemV2, share.ID).Extract()
- if err != nil {
- log.Errorf("function GetExportLocations failed, err:%v", err)
- return nil, err
- }
- log.V(5).Infof("sharesv2.GetExportLocations succeeded\n")
-
- var exportLocations []string
- for _, v := range manilaExportLocations {
- exportLocations = append(exportLocations, v.Path)
- }
-
- respShare := model.FileShareSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Protocols: []string{shareProto},
- Description: opt.GetDescription(),
- Size: opt.GetSize(),
- AvailabilityZone: opt.GetAvailabilityZone(),
- PoolId: opt.GetPoolId(),
- Status: share.Status,
- Metadata: map[string]string{KManilaShareID: share.ID},
- ExportLocations: exportLocations,
- }
-
- log.V(5).Infof("function CreateFileShare succeeded, share:%+v\n", respShare)
- return &respShare, nil
-}
-
-// DeleteFileShare implementation
-func (d *Driver) DeleteFileShare(opt *pb.DeleteFileShareOpts) error {
- manilaShareID := opt.Metadata[KManilaShareID]
- if err := sharesv2.Delete(d.sharedFileSystemV2, manilaShareID).ExtractErr(); err != nil {
- log.Error("cannot delete share:", err)
- return err
- }
-
- log.V(5).Info("function DeleteFileShare succeeded\n")
- return nil
-}
-
-// PullFileShare implementation
-func (d *Driver) PullFileShare(ID string) (*model.FileShareSpec, error) {
- share, err := sharesv2.Get(d.sharedFileSystemV2, ID).Extract()
- if err != nil {
- log.Error("cannot get share:", err)
- return nil, err
- }
-
- respShare := model.FileShareSpec{
- BaseModel: &model.BaseModel{
- Id: ID,
- },
- Name: share.Name,
- Description: share.Description,
- Size: int64(share.Size),
- Status: share.Status,
- }
-
- log.V(5).Infof("function PullFileShare succeeded, share:%+v\n", respShare)
- return &respShare, nil
-}
-
-// CreateFileShareAcl implementation
-func (d *Driver) CreateFileShareAcl(opt *pb.CreateFileShareAclOpts) (fshare *model.FileShareAclSpec, err error) {
- var accessLevel string
- accessCapability := opt.GetAccessCapability()
- canRead, canWrite, canExecute := false, false, false
- for _, v := range accessCapability {
- switch strings.ToLower(v) {
- case "read":
- canRead = true
- case "write":
- canWrite = true
- case "execute":
- canExecute = true
- default:
- return nil, errors.New("accessCapability can only be read, write or execute")
- }
- }
-
- switch {
- case canRead && !canWrite && !canExecute:
- accessLevel = "ro"
- case canRead && canWrite && !canExecute:
- accessLevel = "rw"
- default:
- return nil, errors.New("only read only and read write access level are supported")
- }
-
- // Configure request body.
- opts := &sharesv2.GrantAccessOpts{
- AccessType: opt.Type,
- AccessTo: opt.GetAccessTo(),
- AccessLevel: accessLevel,
- }
-
- d.sharedFileSystemV2.Microversion = "2.7"
- shareACL, err := sharesv2.GrantAccess(d.sharedFileSystemV2, opt.Metadata[KManilaShareID], opts).Extract()
- if err != nil {
- log.Errorf("cannot grant access, err:%v, mailaShareID:%v, opts:%+v\n", err, opt.Metadata[KManilaShareID], opts)
- return nil, err
- }
-
- log.V(5).Infof("sharesv2.GrantAccess succeeded\n")
- respShareACL := model.FileShareAclSpec{
- BaseModel: &model.BaseModel{
- Id: opt.Id,
- },
- FileShareId: opt.FileshareId,
- Type: opt.Type,
- AccessCapability: opt.GetAccessCapability(),
- AccessTo: opt.GetAccessTo(),
- Description: opt.Description,
- Metadata: map[string]string{KManilaShareACLID: shareACL.ID},
- }
-
- log.V(5).Infof("function CreateFileShareAcl succeeded, respShareAcl:%+v\n", respShareACL)
- return &respShareACL, nil
-}
-
-// DeleteFileShareAcl implementation
-func (d *Driver) DeleteFileShareAcl(opt *pb.DeleteFileShareAclOpts) error {
- opts := &sharesv2.RevokeAccessOpts{
- AccessID: opt.Metadata[KManilaShareACLID],
- }
-
- d.sharedFileSystemV2.Microversion = "2.7"
- if err := sharesv2.RevokeAccess(d.sharedFileSystemV2, opt.Metadata[KManilaShareID], opts).ExtractErr(); err != nil {
- log.Error("cannot revoke access:", err)
- return err
- }
-
- log.V(5).Info("function DeleteFileShareAcl succeeded\n")
- return nil
-}
-
-// CreateFileShareSnapshot implementation
-func (d *Driver) CreateFileShareSnapshot(opt *pb.CreateFileShareSnapshotOpts) (*model.FileShareSnapshotSpec, error) {
- mailaShareID := opt.Metadata[KManilaShareID]
- opts := &snapshotsv2.CreateOpts{
- ShareID: mailaShareID,
- Name: opt.GetName(),
- Description: opt.GetDescription(),
- DisplayName: "",
- DisplayDescription: "",
- }
-
- snapshot, err := snapshotsv2.Create(d.sharedFileSystemV2, opts).Extract()
- if err != nil {
- log.Errorf("cannot create snapshot, err:%v, CreateOpts:%+v\n", err, opts)
- return nil, err
- }
-
- // Currently dock framework doesn't support sync data from storage system,
- // therefore, it's necessary to wait for the result of resource's creation.
- // Timout after 10s.
- timeout := time.After(10 * time.Second)
- ticker := time.NewTicker(300 * time.Millisecond)
- done := make(chan bool, 1)
- go func() {
- for {
- select {
- case <-ticker.C:
- tmpSnapshot, err := d.PullFileShareSnapshot(snapshot.ID)
- if err != nil {
- continue
- }
- if tmpSnapshot.Status != "creating" {
- snapshot.Status = tmpSnapshot.Status
- close(done)
- return
- }
- case <-timeout:
- close(done)
- return
- }
-
- }
- }()
- <-done
-
- respSnapshot := model.FileShareSnapshotSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Description: opt.GetDescription(),
- SnapshotSize: int64(snapshot.Size),
- Status: snapshot.Status,
- Metadata: map[string]string{KManilaSnapID: snapshot.ID},
- }
-
- log.V(5).Infof("function CreateFileShareSnapshot succeeded, snapshot:%+v\n", respSnapshot)
- return &respSnapshot, nil
-}
-
-// DeleteFileShareSnapshot implementation
-func (d *Driver) DeleteFileShareSnapshot(opt *pb.DeleteFileShareSnapshotOpts) error {
- manilaSnapID := opt.Metadata[KManilaSnapID]
- if err := snapshotsv2.Delete(d.sharedFileSystemV2, manilaSnapID).ExtractErr(); err != nil {
- log.Error("cannot delete share:", err)
- return err
- }
-
- log.V(5).Info("function DeleteFileShareSnapshot succeeded\n")
- return nil
-}
-
-// PullFileShareSnapshot implementation
-func (d *Driver) PullFileShareSnapshot(ID string) (*model.FileShareSnapshotSpec, error) {
- snapshot, err := snapshotsv2.Get(d.sharedFileSystemV2, ID).Extract()
- if err != nil {
- log.Error("cannot get snapshot:", err)
- return nil, err
- }
-
- respShareSnap := model.FileShareSnapshotSpec{
- BaseModel: &model.BaseModel{
- Id: ID,
- },
- Name: snapshot.Name,
- Description: snapshot.Description,
- SnapshotSize: int64(snapshot.Size),
- Status: snapshot.Status,
- }
-
- log.V(5).Infof("function PullFileShareSnapshot succeeded, snapshot:%+v\n", respShareSnap)
- return &respShareSnap, nil
-}
-
-// GetProtoFromProfile implementation
-func (d *Driver) GetProtoFromProfile(prf string) (string, error) {
- if prf == "" {
- msg := "profile cannot be empty"
- return "", errors.New(msg)
- }
-
- log.V(5).Infof("file share profile is %s", prf)
- profile := &model.ProfileSpec{}
- err := json.Unmarshal([]byte(prf), profile)
- if err != nil {
- msg := fmt.Sprintf("unmarshal profile failed: %v", err)
- return "", errors.New(msg)
- }
-
- shareProto := profile.ProvisioningProperties.IOConnectivity.AccessProtocol
- if shareProto == "" {
- msg := "file share protocol cannot be empty"
- return "", errors.New(msg)
- }
-
- return shareProto, nil
-}
diff --git a/contrib/drivers/filesharedrivers/nfs/cli.go b/contrib/drivers/filesharedrivers/nfs/cli.go
deleted file mode 100644
index 4fc9f8a6c..000000000
--- a/contrib/drivers/filesharedrivers/nfs/cli.go
+++ /dev/null
@@ -1,358 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package nfs
-
-import (
- "fmt"
- "net"
- "path"
- "strconv"
- "strings"
-
- "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/utils/exec"
-)
-
-type Cli struct {
- // Command executer
- BaseExecuter exec.Executer
- // Command Root executer
- RootExecuter exec.Executer
-}
-
-func NewCli() (*Cli, error) {
- return &Cli{
- BaseExecuter: exec.NewBaseExecuter(),
- RootExecuter: exec.NewRootExecuter(),
- }, nil
-}
-
-func (c *Cli) execute(cmd ...string) (string, error) {
- return c.RootExecuter.Run(cmd[0], cmd[1:]...)
-}
-
-func sizeStr(size int64) string {
- return fmt.Sprintf("%dg", size)
-}
-
-func (c *Cli) GetExportLocation(share_name, ip string) string {
- server := net.ParseIP(ip)
- if server == nil {
- glog.Errorf("this is not a valid ip:")
- return ""
- }
- var exportLocation string
- sharePath := path.Join(MountPath, share_name)
- exportLocation = fmt.Sprintf("%s:%s", server, strings.Replace(sharePath, "-", "_", -1))
- return exportLocation
-}
-
-func (c *Cli) CreateAccess(accessto, accesscapability, fname string) error {
- var accesstoAndMount string
- sharePath := path.Join(MountPath, fname)
- accesstoAndMount = fmt.Sprintf("%s:%s", accessto, strings.Replace(sharePath, "-", "_", -1))
- cmd := []string{
- "env", "LC_ALL=C",
- "exportfs",
- "-o",
- accesscapability,
- accesstoAndMount,
- }
- _, err := c.execute(cmd...)
-
- return err
-}
-
-func (c *Cli) DeleteAccess(accessto, fname string) error {
- var accesstoAndMount string
- sharePath := path.Join(MountPath, fname)
- accesstoAndMount = fmt.Sprintf("%s:%s", accessto, strings.Replace(sharePath, "-", "_", -1))
- cmd := []string{
- "env", "LC_ALL=C",
- "exportfs",
- "-u",
- accesstoAndMount,
- }
- _, err := c.execute(cmd...)
-
- return err
-}
-
-func (c *Cli) UnMount(dirName string) error {
- cmd := []string{
- "env", "LC_ALL=C",
- "umount",
- dirName,
- }
- _, err := c.execute(cmd...)
- if err != nil {
- if err.Error() == "exit status 32" {
- return nil
- }
- }
- return err
-}
-
-func (c *Cli) Mount(lvPath, dirName string) error {
- cmd := []string{
- "env", "LC_ALL=C",
- "mount",
- lvPath,
- dirName,
- }
- _, err := c.execute(cmd...)
- return err
-}
-
-func (c *Cli) CreateDirectory(dirName string) error {
- cmd := []string{
- "env", "LC_ALL=C",
- "mkdir",
- dirName,
- }
- _, err := c.execute(cmd...)
- return err
-}
-
-func (c *Cli) DeleteDirectory(dirName string) error {
- cmd := []string{
- "env", "LC_ALL=C",
- "rm", "-rf",
- dirName,
- }
- _, err := c.execute(cmd...)
- return err
-}
-
-func (c *Cli) SetPermission(dirName string) error {
- cmd := []string{
- "env", "LC_ALL=C",
- "chmod",
- "777",
- dirName,
- }
- _, err := c.execute(cmd...)
- return err
-}
-
-func (c *Cli) CreateFileShare(lvPath string) error {
- // create a filesytem
- cmd := []string{
- "env", "LC_ALL=C",
- "mke2fs",
- lvPath,
- }
- out := cmd
- glog.Infof(": CMD: %s, RESPONSE: %s", strings.Join(cmd, " "), out)
- _, err := c.execute(cmd...)
- return err
-}
-
-func (c *Cli) CreateFileShareFromSnapshot(lvPath string) error {
- cmd := []string{
- "env", "LC_ALL=C",
- "lvconvert",
- "--merge",
- lvPath,
- }
- _, err := c.execute(cmd...)
- if err != nil {
- // Deal with the error, probably pushing it up the call stack
- return err
- }
- return nil
-}
-
-func (c *Cli) CreateVolume(name string, vg string, size int64) error {
- cmd := []string{
- "env", "LC_ALL=C",
- "lvcreate",
- "-Z", "n",
- "-n", name,
- "-L", sizeStr(size),
- vg,
- }
- _, err := c.execute(cmd...)
- if err == nil {
- // Deal with the error, probably pushing it up the call stack
- return err
- }
-
- return err
-}
-
-func (c *Cli) Exists(name string) bool {
- cmd := []string{
- "env", "LC_ALL=C",
- "lvs",
- "--noheadings",
- "-o", "name",
- }
- out, err := c.execute(cmd...)
- if err != nil {
- return false
- }
- for _, field := range strings.Fields(out) {
- if field == name {
- return true
- }
- }
- return false
-}
-
-func (c *Cli) LvIsActivate(name, vg string) bool {
- cmd := []string{
- "env", "LC_ALL=C",
- "lvdisplay",
- "--noheading",
- "-C", "-o",
- "Attr", path.Join(vg, name),
- }
- out, err := c.execute(cmd...)
- if err != nil {
- glog.Error("failed to display logic volume:", err)
- return false
- }
- out = strings.TrimSpace(out)
- return out[4] == 'a'
-}
-
-// delete volume or snapshot
-func (c *Cli) Delete(name, lvpath string) error {
- // LV removal seems to be a race with other writers so we enable retry deactivation
- lvmConfig := "activation { retry_deactivation = 1} "
- cmd := []string{
- "env", "LC_ALL=C",
- "lvremove",
- "--config", lvmConfig,
- "-f",
- lvpath,
- }
-
- if out, err := c.execute(cmd...); err != nil {
- glog.Infof("error reported running lvremove: CMD: %s, RESPONSE: %s",
- strings.Join(cmd, " "), out)
- // run_udevadm_settle
- c.execute("udevadm", "settle")
-
- // The previous failing lvremove -f might leave behind
- // suspended devices; when lvmetad is not available, any
- // further lvm command will block forever.
- // Therefore we need to skip suspended devices on retry.
- lvmConfig += "devices { ignore_suspended_devices = 1}"
- cmd := []string{
- "env", "LC_ALL=C",
- "lvremove",
- "--config", lvmConfig,
- "-f",
- lvpath,
- }
- if _, err := c.execute(cmd...); err != nil {
- return err
- }
- glog.Infof("successfully deleted volume: %s after udev settle.", name)
- }
- return nil
-}
-
-type VolumeGroup struct {
- Name string
- TotalCapacity int64
- FreeCapacity int64
- UUID string
-}
-
-func (c *Cli) ListVgs() (*[]VolumeGroup, error) {
- cmd := []string{
- "env", "LC_ALL=C",
- "vgs",
- "--noheadings",
- "--nosuffix",
- "--unit=g",
- "-o", "name,size,free,uuid",
- }
- out, err := c.execute(cmd...)
- if err != nil {
- return nil, err
- }
- lines := strings.Split(out, "\n")
- var vgs []VolumeGroup
- for _, line := range lines {
- if len(line) == 0 {
- continue
- }
- fields := strings.Fields(line)
- total, _ := strconv.ParseFloat(fields[1], 64)
- free, _ := strconv.ParseFloat(fields[2], 64)
- vg := VolumeGroup{
- Name: fields[0],
- TotalCapacity: int64(total),
- FreeCapacity: int64(free),
- UUID: fields[3],
- }
- vgs = append(vgs, vg)
- }
- return &vgs, nil
-}
-
-func (c *Cli) CreateLvSnapshot(name, sourceLvName, vg string, size int64) error {
- cmd := []string{
- "env", "LC_ALL=C",
- "lvcreate",
- "-n", name,
- "-L", sizeStr(size),
- "-p", "r",
- "-s", path.Join("/dev", vg, sourceLvName),
- }
- fmt.Println("cmd==:", cmd)
- if _, err := c.execute(cmd...); err != nil {
- return err
- }
- return nil
-}
-
-// delete volume or snapshot
-func (c *Cli) DeleteFileShareSnapshots(name, vg string) error {
- // LV removal seems to be a race with other writers so we enable retry deactivation
- lvmConfig := "activation { retry_deactivation = 1} "
- cmd := []string{
- "env", "LC_ALL=C",
- "lvremove",
- "--config", lvmConfig,
- "-f",
- path.Join(vg, name),
- }
-
- if out, err := c.execute(cmd...); err != nil {
- glog.Infof("Error reported running lvremove: CMD: %s, RESPONSE: %s",
- strings.Join(cmd, " "), out)
- // run_udevadm_settle
- c.execute("udevadm", "settle")
-
- lvmConfig += "devices { ignore_suspended_devices = 1}"
- cmd := []string{
- "env", "LC_ALL=C",
- "lvremove",
- "--config", lvmConfig,
- "-f",
- path.Join(vg, name),
- }
- if _, err := c.execute(cmd...); err != nil {
- return err
- }
- glog.Infof("Successfully deleted fileshare snapshot: %s after udev settle.", name)
- }
- return nil
-}
diff --git a/contrib/drivers/filesharedrivers/nfs/nfs.go b/contrib/drivers/filesharedrivers/nfs/nfs.go
deleted file mode 100644
index b763b4465..000000000
--- a/contrib/drivers/filesharedrivers/nfs/nfs.go
+++ /dev/null
@@ -1,372 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package nfs
-
-import (
- "errors"
- "path"
- "strings"
-
- log "github.com/golang/glog"
- . "github.com/opensds/opensds/contrib/drivers/utils/config"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- "github.com/opensds/opensds/pkg/utils"
- "github.com/opensds/opensds/pkg/utils/config"
- uuid "github.com/satori/go.uuid"
-)
-
-const (
- defaultTgtConfDir = "/etc/tgt/conf.d"
- defaultTgtBindIp = "127.0.0.1"
- defaultConfPath = "/etc/opensds/driver/nfs.yaml"
- FileSharePrefix = "fileshare-"
- snapshotPrefix = "_snapshot-"
- blocksize = 4096
- sizeShiftBit = 30
- opensdsnvmepool = "opensds-nvmegroup"
- nvmeofAccess = "nvmeof"
- iscsiAccess = "iscsi"
-)
-
-const (
- KLvPath = "lvPath"
- KLvsPath = "lvsPath"
- KFileshareName = "nfsFileshareName"
- KFileshareID = "nfsFileshareID"
- KFileshareSnapName = "snapshotName"
- KFileshareSnapID = "snapshotID"
- AccessLevelRo = "ro"
- AccessLevelRw = "rw"
- MountPath = "/mnt"
-)
-
-type NFSConfig struct {
- TgtBindIp string `yaml:"tgtBindIp"`
- TgtConfDir string `yaml:"tgtConfDir"`
- EnableChapAuth bool `yaml:"enableChapAuth"`
- Pool map[string]PoolProperties `yaml:"pool,flow"`
-}
-
-type Driver struct {
- conf *NFSConfig
- cli *Cli
-}
-
-func (d *Driver) Setup() error {
- // Read nfs config file
- d.conf = &NFSConfig{TgtBindIp: defaultTgtBindIp, TgtConfDir: defaultTgtConfDir}
- p := config.CONF.OsdsDock.Backends.NFS.ConfigPath
- if "" == p {
- p = defaultConfPath
- }
- if _, err := Parse(d.conf, p); err != nil {
- return err
- }
- cli, err := NewCli()
- if err != nil {
- return err
- }
- d.cli = cli
-
- return nil
-}
-
-func (*Driver) Unset() error { return nil }
-
-func (d *Driver) CreateFileShareAcl(opt *pb.CreateFileShareAclOpts) (*model.FileShareAclSpec, error) {
- var access string
- // Get accessto list
- accessTo := opt.GetAccessTo()
- // get accessCapability list
- accessCapability := opt.GetAccessCapability()
- // get fileshare name
- fname := opt.Name
-
- permissions := []string{"write"}
- WriteAccess := false
-
- for _, value := range accessCapability {
- value = strings.ToLower(value)
- if utils.Contains(permissions, value) {
- WriteAccess = true
- }
- if value == "Execute" {
- log.Error("invalid permission:", value)
- return nil, nil
- }
- }
- if WriteAccess {
- access = AccessLevelRw
- } else {
- access = AccessLevelRo
- }
-
- if err := d.cli.CreateAccess(accessTo, access, fname); err != nil {
- log.Errorf("grant access %s to %s failed %v", accessTo, fname, err)
- return nil, err
- }
-
- shareAccess := &model.FileShareAclSpec{
- BaseModel: &model.BaseModel{
- Id: opt.Id,
- },
- FileShareId: opt.FileshareId,
- Type: opt.Type,
- AccessCapability: accessCapability,
- AccessTo: accessTo,
- Metadata: map[string]string{},
- }
- return shareAccess, nil
-}
-
-func (d *Driver) DeleteFileShareAcl(opt *pb.DeleteFileShareAclOpts) error {
- // Get accessto list
- accessTo := opt.GetAccessTo()
- // get fileshare name
- fname := opt.Name
-
- if err := d.cli.DeleteAccess(accessTo, fname); err != nil {
- log.Error("cannot revoke access:", err)
- return err
- }
-
- return nil
-}
-
-func (d *Driver) CreateFileShare(opt *pb.CreateFileShareOpts) (*model.FileShareSpec, error) {
-
- var fshare *model.FileShareSpec
- //get the server ip for configuration
- var server = d.conf.TgtBindIp
- //get fileshare name
- var name = opt.GetName()
- //get volume group
- var vg = opt.GetPoolName()
- // Crete a directory to mount
- var dirName = path.Join(MountPath, name)
- // create a fileshare path
- var lvPath = path.Join("/dev", vg, name)
-
- if err := d.cli.CreateDirectory(dirName); err != nil {
- log.Error("failed to create a directory:", err)
- return nil, err
- }
-
- if opt.SnapshotId != "" {
- // User requested for creating fileshare using existing snapshot
- //get fileshare name
- var existingFsName = opt.GetMetadata()[KFileshareName]
-
- // get existingfileshare snap logical path
- //get volume group
- var vg = opt.GetPoolName()
- // create a fileshare device path
- var lvPathForSnap = path.Join("/dev", vg, opt.SnapshotName)
- // create a existing fileshare device path
- var lvPathExistingPath = path.Join("/dev", vg, existingFsName)
- // get directory where fileshare mounted
- var olddirName = path.Join(MountPath, existingFsName)
- // umount the volume to directory
- if err := d.cli.UnMount(olddirName); err != nil {
- log.Error("failed to unmount a directory:", err)
- return nil, err
- }
-
- if err := d.cli.CreateFileShareFromSnapshot(lvPathForSnap); err != nil {
- log.Error("failed to create filesystem from given snapshot:", err)
- return nil, err
- }
- // mount the volume to directory
- if err := d.cli.Mount(lvPathExistingPath, dirName); err != nil {
- log.Error("failed to mount a directory:", err)
- return nil, err
- }
- } else {
- if err := d.cli.CreateVolume(name, vg, opt.GetSize()); err != nil {
- return nil, err
- }
- // remove created volume if got error
- defer func() {
- // using return value as the error flag
- if fshare == nil {
- if err := d.cli.Delete(name, vg); err != nil {
- log.Error("failed to remove volume fileshare:", err)
- }
- }
- }()
-
- // Crete fileshare on this path
- if err := d.cli.CreateFileShare(lvPath); err != nil {
- log.Error("failed to create filesystem logic volume:", err)
- return nil, err
- }
- // mount the volume to directory
- if err := d.cli.Mount(lvPath, dirName); err != nil {
- log.Error("failed to mount a directory:", err)
- return nil, err
- }
- }
- // Set permission to directory
- if err := d.cli.SetPermission(dirName); err != nil {
- log.Error("failed to set permission:", err)
- return nil, err
- }
- // get export location of fileshare
- var location []string
- location = []string{d.cli.GetExportLocation(name, server)}
- if len(location) == 0 {
- errMsg := errors.New("failed to get exportlocation: export location is empty!")
- log.Error(errMsg)
- return nil, errMsg
- }
-
- fshare = &model.FileShareSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Size: opt.GetSize(),
- Description: opt.GetDescription(),
- AvailabilityZone: opt.GetAvailabilityZone(),
- PoolId: opt.GetPoolId(),
- Protocols: []string{NFSProtocol},
- ExportLocations: location,
- Metadata: map[string]string{
- KFileshareName: name,
- KFileshareSnapName: "",
- KFileshareID: opt.GetId(),
- KLvPath: lvPath,
- },
- }
- return fshare, nil
-}
-
-// ListPools
-func (d *Driver) ListPools() ([]*model.StoragePoolSpec, error) {
- vgs, err := d.cli.ListVgs()
- if err != nil {
- return nil, err
- }
- var pols []*model.StoragePoolSpec
- for _, vg := range *vgs {
- if _, ok := d.conf.Pool[vg.Name]; !ok {
- continue
- }
-
- pol := &model.StoragePoolSpec{
- BaseModel: &model.BaseModel{
- Id: uuid.NewV5(uuid.NamespaceOID, vg.UUID).String(),
- },
- Name: vg.Name,
- TotalCapacity: vg.TotalCapacity,
- FreeCapacity: vg.FreeCapacity,
- StorageType: d.conf.Pool[vg.Name].StorageType,
- Extras: d.conf.Pool[vg.Name].Extras,
- AvailabilityZone: d.conf.Pool[vg.Name].AvailabilityZone,
- }
- if pol.AvailabilityZone == "" {
- pol.AvailabilityZone = "default"
- }
- pols = append(pols, pol)
- }
- return pols, nil
-}
-
-// delete fileshare from device
-func (d *Driver) DeleteFileShare(opt *pb.DeleteFileShareOpts) error {
- // get fileshare name to be deleted
- fname := opt.GetMetadata()[KFileshareName]
- if !d.cli.Exists(fname) {
- log.Warningf("fileshare(%s) does not exist, nothing to remove", fname)
- return nil
- }
- // get fileshare path
- lvPath := opt.GetMetadata()[KLvPath]
- // get directory where fileshare mounted
- var dirName = path.Join(MountPath, fname)
-
- // umount the volume to directory
- if err := d.cli.UnMount(dirName); err != nil {
- log.Error("failed to unmount the directory:", err)
- return err
- }
- // delete the actual fileshare from device
- if err := d.cli.Delete(fname, lvPath); err != nil {
- log.Error("failed to remove logic volume:", err)
- return err
- }
- // Delete the directory
- if err := d.cli.DeleteDirectory(dirName); err != nil {
- log.Error("failed to delete the directory:", err)
- return err
- }
- return nil
-}
-
-// CreateFileShareSnapshot
-func (d *Driver) CreateFileShareSnapshot(opt *pb.CreateFileShareSnapshotOpts) (*model.FileShareSnapshotSpec, error) {
- lvPath, ok := opt.GetMetadata()[KLvPath]
- if !ok {
- err := errors.New("can't find 'lvPath' in snapshot metadata")
- log.Error(err)
- return nil, err
- }
- snapName := opt.GetName()
- fields := strings.Split(lvPath, "/")
-
- vg, sourceLvName := fields[2], fields[3]
- if err := d.cli.CreateLvSnapshot(snapName, sourceLvName, vg, opt.GetSize()); err != nil {
- log.Error("failed to create logic volume snapshot:", err)
- return nil, err
- }
-
- return &model.FileShareSnapshotSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- SnapshotSize: opt.GetSize(),
- Description: opt.GetDescription(),
- Metadata: map[string]string{
- KFileshareSnapName: snapName,
- KFileshareSnapID: opt.GetId(),
- KLvPath: lvPath,
- },
- }, nil
-}
-
-// DeleteFileShareSnapshot
-func (d *Driver) DeleteFileShareSnapshot(opt *pb.DeleteFileShareSnapshotOpts) error {
- lvsPath, ok := opt.GetMetadata()[KLvPath]
- snapName := opt.GetMetadata()[KFileshareSnapName]
- if !ok {
- err := errors.New("can't find 'lvsPath' in snapshot metadata, ingnore it!")
- log.Error(err)
- return nil
- }
- fields := strings.Split(lvsPath, "/")
- vg := fields[2]
- if !d.cli.Exists(snapName) {
- log.Warningf("Snapshot(%s) does not exist, nothing to remove", snapName)
- return nil
- }
-
- if err := d.cli.DeleteFileShareSnapshots(snapName, vg); err != nil {
- log.Error("failed to remove logic volume:", err)
- return err
- }
- return nil
-}
diff --git a/contrib/drivers/filesharedrivers/nfs/nfs_test.go b/contrib/drivers/filesharedrivers/nfs/nfs_test.go
deleted file mode 100644
index 080f23570..000000000
--- a/contrib/drivers/filesharedrivers/nfs/nfs_test.go
+++ /dev/null
@@ -1,204 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package nfs
-
-import (
- "fmt"
- "reflect"
- "testing"
-
- //"github.com/opensds/opensds/contrib/drivers/filesharedrivers/nfs"
- . "github.com/opensds/opensds/contrib/drivers/utils/config"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- "github.com/opensds/opensds/pkg/utils/config"
- "github.com/opensds/opensds/pkg/utils/exec"
-)
-
-var fp = map[string]PoolProperties{
- "opensds-files-default": {
- StorageType: "file",
- AvailabilityZone: "default",
- MultiAttach: true,
- Extras: model.StoragePoolExtraSpec{
- DataStorage: model.DataStorageLoS{
- ProvisioningPolicy: "Thin",
- Compression: false,
- Deduplication: false,
- StorageAccessCapability: []string{"Read", "Write", "Execute"},
- },
- IOConnectivity: model.IOConnectivityLoS{
- AccessProtocol: "nfs",
- MaxIOPS: 7000000,
- MaxBWS: 600,
- MinIOPS: 1000000,
- MinBWS: 100,
- Latency: 100,
- },
- Advanced: map[string]interface{}{
- "diskType": "SSD",
- "latency": "5ms",
- },
- },
- },
-}
-
-func TestSetup(t *testing.T) {
- var d = &Driver{}
- config.CONF.OsdsDock.Backends.NFS.ConfigPath = "testdata/nfs.yaml"
- var expectedDriver = &Driver{
- conf: &NFSConfig{
- Pool: fp,
- TgtBindIp: "11.242.178.20",
- TgtConfDir: "/etc/tgt/conf.d",
- EnableChapAuth: false,
- },
- }
-
- if err := d.Setup(); err != nil {
- t.Errorf("Setup nfs driver failed: %+v\n", err)
- }
- if !reflect.DeepEqual(d.conf, expectedDriver.conf) {
- t.Errorf("Expected %+v, got %+v", expectedDriver.conf, d.conf)
- }
-}
-
-type FakeResp struct {
- out string
- err error
-}
-
-func NewFakeExecuter(respMap map[string]*FakeResp) exec.Executer {
- return &FakeExecuter{RespMap: respMap}
-}
-
-type FakeExecuter struct {
- RespMap map[string]*FakeResp
-}
-
-func (f *FakeExecuter) Run(name string, args ...string) (string, error) {
- var cmd = name
- if name == "env" {
- cmd = args[1]
- }
- v, ok := f.RespMap[cmd]
- if !ok {
- return "", fmt.Errorf("can not find specified op: %s", args[1])
- }
- return v.out, v.err
-}
-
-func TestCreateFileShare(t *testing.T) {
- var fd = &Driver{}
- config.CONF.OsdsDock.Backends.NFS.ConfigPath = "testdata/nfs.yaml"
- fd.Setup()
-
- respMap := map[string]*FakeResp{
- "mkdir": {"", nil},
- "mke2fs": {"", nil},
- "mount": {"", nil},
- "chmod": {"", nil},
- "lvconvert": {"", nil},
- "lvcreate": {"", nil},
- }
- fd.cli.RootExecuter = NewFakeExecuter(respMap)
- fd.cli.BaseExecuter = NewFakeExecuter(respMap)
-
- opt := &pb.CreateFileShareOpts{
- Id: "e1bb066c-5ce7-46eb-9336-25508cee9f71",
- Name: "test001",
- Description: "fileshare for testing",
- Size: int64(1),
- PoolName: "vg001",
- }
- var expected = &model.FileShareSpec{
- BaseModel: &model.BaseModel{Id: "e1bb066c-5ce7-46eb-9336-25508cee9f71"},
- Name: "test001",
- Description: "fileshare for testing",
- Size: int64(1),
- Protocols: []string{"nfs"},
- ExportLocations: []string{"11.242.178.20:/mnt/test001"},
- Metadata: map[string]string{
- "lvPath": "/dev/vg001/test001",
- "nfsFileshareID": "e1bb066c-5ce7-46eb-9336-25508cee9f71",
- "nfsFileshareName": "test001",
- "snapshotName": "",
- },
- }
- fileshare, err := fd.CreateFileShare(opt)
- if err != nil {
- t.Error("Failed to create fileshare:", err)
- }
- if !reflect.DeepEqual(fileshare, expected) {
- t.Errorf("Expected %+v, got %+v\n", expected, fileshare)
- }
-}
-
-func TestListPools(t *testing.T) {
- var fd = &Driver{}
- config.CONF.OsdsDock.Backends.NFS.ConfigPath = "testdata/nfs.yaml"
- fd.Setup()
-
- var vgsResp = `opensds-files-default 20.00 20.00 WSpJ3r-JYVF-DYNq-1rCe-5I6j-Zb3d-8Ub0Hg
- opensds-volumes-default 20.00 20.00 t7mLWW-AeCf-LtuF-7K8p-R4xA-QC5x-61qx3H`
- respMap := map[string]*FakeResp{
- "vgs": {vgsResp, nil},
- }
- fd.cli.RootExecuter = NewFakeExecuter(respMap)
- fd.cli.BaseExecuter = NewFakeExecuter(respMap)
-
- var expected = []*model.StoragePoolSpec{
- {
- BaseModel: &model.BaseModel{},
- Name: "opensds-files-default",
- TotalCapacity: int64(20),
- FreeCapacity: int64(20),
- AvailabilityZone: "default",
- StorageType: "file",
- MultiAttach: false,
- Extras: model.StoragePoolExtraSpec{
- DataStorage: model.DataStorageLoS{
- ProvisioningPolicy: "Thin",
- Compression: false,
- Deduplication: false,
- StorageAccessCapability: []string{"Read", "Write", "Execute"},
- },
- IOConnectivity: model.IOConnectivityLoS{
- AccessProtocol: "nfs",
- MaxIOPS: 7000000,
- MaxBWS: 600,
- MinIOPS: 1000000,
- MinBWS: 100,
- Latency: 100,
- },
- Advanced: map[string]interface{}{
- "diskType": "SSD",
- "latency": "5ms",
- },
- },
- },
- }
-
- pols, err := fd.ListPools()
- if err != nil {
- t.Error("Failed to list pools:", err)
- }
- for i := range pols {
- pols[i].Id = ""
- }
- if !reflect.DeepEqual(pols, expected) {
- t.Errorf("Expected %+v, got %+v\n", expected[0], pols[0])
- }
-}
diff --git a/contrib/drivers/filesharedrivers/nfs/testdata/nfs.yaml b/contrib/drivers/filesharedrivers/nfs/testdata/nfs.yaml
deleted file mode 100644
index cb98cb35e..000000000
--- a/contrib/drivers/filesharedrivers/nfs/testdata/nfs.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-tgtBindIp: 11.242.178.20
-tgtConfDir: /etc/tgt/conf.d
-pool:
- opensds-files-default:
- diskType: NL-SAS
- availabilityZone: default
- multiAttach: true
- storageType: file
- extras:
- dataStorage:
- provisioningPolicy: Thin
- compression: false
- deduplication: false
- storageAccessCapability:
- - Read
- - Write
- - Execute
- ioConnectivity:
- accessProtocol: nfs
- maxIOPS: 7000000
- maxBWS: 600
- minIOPS: 1000000
- minBWS: 100
- latency: 100
- advanced:
- diskType: SSD
- latency: 5ms
diff --git a/contrib/drivers/filesharedrivers/oceanstor/cifs.go b/contrib/drivers/filesharedrivers/oceanstor/cifs.go
deleted file mode 100644
index fcbea9e71..000000000
--- a/contrib/drivers/filesharedrivers/oceanstor/cifs.go
+++ /dev/null
@@ -1,188 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package oceanstor
-
-import (
- "fmt"
- "strings"
-)
-
-type CIFS struct {
- *Client
-}
-
-func (c *CIFS) getShareID(share interface{}) string {
- return share.(*CIFSShareData).ID
-}
-
-func (c *CIFS) getShareByID(shareID string) (interface{}, error) {
- url := "/CIFSHARE/" + shareID
- resp, err := c.request(url, "GET", nil)
- if err != nil {
- return nil, err
- }
-
- var cifsShare CIFSShare
- if err := handleReponse(resp, &cifsShare); err != nil {
- return nil, err
- }
-
- if cifsShare.Data.ID == "" {
- return nil, nil
- }
-
- return &cifsShare.Data, nil
-}
-
-func (c *CIFS) createShare(shareName, fsId string) (interface{}, error) {
- sharePath := getSharePath(shareName)
- data := map[string]string{
- "SHAREPATH": sharePath,
- "DESCRIPTION": "",
- "ABEENABLE": "false",
- "ENABLENOTIFY": "true",
- "ENABLEOPLOCK": "true",
- "NAME": strings.Replace(shareName, "-", "_", -1),
- "FSID": fsId,
- "TENANCYID": "0",
- }
-
- url := "/CIFSHARE"
-
- resp, err := c.request(url, "POST", data)
- if err != nil {
- return nil, err
- }
-
- var cifsShare CIFSShare
-
- if err := handleReponse(resp, &cifsShare); err != nil {
- return nil, err
- }
-
- return &cifsShare.Data, nil
-}
-
-func (c *CIFS) getShare(shareName string) (interface{}, error) {
- url := fmt.Sprintf("/CIFSHARE?filter=NAME:%s&range=[0-100]", strings.Replace(shareName, "-", "_", -1))
- resp, err := c.request(url, "GET", nil)
- if err != nil {
- return nil, err
- }
-
- var cifsShareList CIFSShareList
- if err := handleReponse(resp, &cifsShareList); err != nil {
- return nil, err
- }
-
- if len(cifsShareList.Data) > 0 {
- return &cifsShareList.Data[0], nil
- }
-
- return nil, nil
-}
-
-func (c *CIFS) listShares() ([]CIFSShareData, error) {
- url := "/CIFSHARE"
- resp, err := c.request(url, "GET", nil)
- if err != nil {
- return nil, err
- }
-
- var cifsShareList CIFSShareList
- if err := handleReponse(resp, &cifsShareList); err != nil {
- return nil, err
- }
-
- return cifsShareList.Data, nil
-}
-
-func (c *CIFS) deleteShare(shareID string) error {
- url := "/cifshare/" + shareID
- resp, err := c.request(url, "DELETE", nil)
- if err != nil {
- return err
- }
-
- var errDelete DeleteError
- if err := handleReponse(resp, &errDelete); err != nil {
- return err
- }
-
- return nil
-}
-
-func (c *CIFS) allowAccess(shareID, accessTo, accessLevel string) (interface{}, error) {
-
- domainType := map[string]string{"local": "2", "ad": "0"}
-
- sendRest := func(accessTo, domain string) (*CIFSShareClientData, error) {
- url := "/CIFS_SHARE_AUTH_CLIENT"
- data := map[string]string{
- "NAME": accessTo,
- "PARENTID": shareID,
- "PERMISSION": accessLevel,
- "DOMAINTYPE": domain,
- }
-
- resp, err := c.request(url, "POST", data)
- if err != nil {
- return nil, err
- }
-
- var cifsClient CIFSShareClient
- if err := handleReponse(resp, &cifsClient); err != nil {
- return nil, err
- }
-
- return &cifsClient.Data, nil
- }
-
- var data *CIFSShareClientData
- var errRest error
-
- if !strings.Contains(accessTo, "\\\\") {
- // First, try to add user access
- if data, errRest = sendRest(accessTo, domainType["local"]); errRest != nil {
- // Second, if add user access failed, try to add group access.
- if data, errRest = sendRest("@"+accessTo, domainType["local"]); errRest != nil {
- return nil, errRest
- }
- }
-
- } else {
- // If add domain user access failed, try to add domain group access.
- if data, errRest = sendRest(accessTo, domainType["ad"]); errRest != nil {
- if data, errRest = sendRest("@"+accessTo, domainType["ad"]); errRest != nil {
- return nil, errRest
- }
- }
- }
-
- return data, nil
-}
-
-// getLocation
-func (c *CIFS) getLocation(sharePath, ipAddr string) string {
- path := strings.Replace(sharePath, "-", "_", -1)
- return fmt.Sprintf("\\\\%s\\%s", ipAddr, path)
-}
-
-func (c *CIFS) getAccessLevel(accessLevel string) string {
- if accessLevel == AccessLevelRW {
- return AccessCIFSFullControl
- }
- return AccessCIFSRo
-}
diff --git a/contrib/drivers/filesharedrivers/oceanstor/constants.go b/contrib/drivers/filesharedrivers/oceanstor/constants.go
deleted file mode 100644
index 92f75bb43..000000000
--- a/contrib/drivers/filesharedrivers/oceanstor/constants.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package oceanstor
-
-const (
- DefaultConfPath = "/etc/opensds/driver/oceanstor_fileshare.yaml"
- PwdExpired = 3
- PwdReset = 4
- NFSProto = "nfs"
- CIFSProto = "cifs"
- UnitGi = 1024 * 1024 * 1024
- defaultAZ = "default"
- defaultFileSystem = "opensds_file_system"
- StatusFSHealth = "1"
- StatusFSRunning = "27"
- AccessLevelRW = "rw"
- AccessLevelRO = "ro"
- AccessNFSRw = "1"
- AccessNFSRo = "0"
- AccessCIFSRo = "0"
- AccessCIFSFullControl = "1"
- MaxRetry = 3
- FileShareName = "fileshareName"
- FileShareID = "shareId"
- NamePrefix = "opensds"
- FileShareSnapshotID = "fileshareSnapId"
- AccessTypeUser = "user"
- AccessTypeIp = "ip"
- AccessLevelRead = "read"
- AccessLevelWrite = "write"
- AccessLevelExecute = "execute"
-)
diff --git a/contrib/drivers/filesharedrivers/oceanstor/model.go b/contrib/drivers/filesharedrivers/oceanstor/model.go
deleted file mode 100644
index 7024860ca..000000000
--- a/contrib/drivers/filesharedrivers/oceanstor/model.go
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package oceanstor
-
-import (
- . "github.com/opensds/opensds/contrib/drivers/utils/config"
-)
-
-type Client struct {
- username string
- password string
- urlPrefix string
- deviceId string
- iBaseToken string
- cookie string
- header map[string]string
-}
-
-type Driver struct {
- *Config
- *Client
-}
-
-type AuthOptions struct {
- Username string `yaml:"username"`
- Password string `yaml:"password"`
- Endpoint string `yaml:"endpoints"`
- PwdEncrypter string `yaml:"PwdEncrypter,omitempty"`
- EnableEncrypted bool `yaml:"EnableEncrypted,omitempty"`
-}
-
-type Config struct {
- AuthOptions `yaml:"authOptions"`
- Pool map[string]PoolProperties `yaml:"pool,flow"`
-}
-
-type Error struct {
- Code int `json:"code"`
- Description string `json:"description"`
-}
-
-type DeleteError struct {
- Error `json:"error"`
-}
-
-type AuthData struct {
- AccountState int `json:"accountstate"`
- DeviceId string `json:"deviceid"`
- IBaseToken string `json:"iBaseToken"`
- LastLoginIp string `json:"lastloginip"`
- LastLoginTime int `json:"lastlogintime"`
- Level int `json:"level"`
- PwdChanGeTime int `json:"pwdchangetime"`
- UserGroup string `json:"usergroup"`
- UserId string `json:"userid"`
- UserName string `json:"username"`
- UserScope string `json:"userscope"`
-}
-
-type Auth struct {
- Data AuthData `json:"data"`
- Error `json:"error"`
-}
-
-type StoragePool struct {
- Description string `json:"DESCRIPTION"`
- Id string `json:"ID"`
- Name string `json:"NAME"`
- UserFreeCapacity string `json:"USERFREECAPACITY"`
- UserTotalCapacity string `json:"USERTOTALCAPACITY"`
-}
-
-type StoragePoolList struct {
- Data []StoragePool `json:"data"`
- Error `json:"error"`
-}
-
-type FileSystem struct {
- Data FileSystemData `json:"data"`
- Error `json:"error"`
-}
-
-type FileSystemList struct {
- Data []FileSystemData `json:"data"`
- Error `json:"error"`
-}
-
-type FileSystemData struct {
- HealthStatus string `json:"HEALTHSTATUS"`
- RunningStatus string `json:"RUNNINGSTATUS"`
- ID string `json:"ID"`
- Capacity string `json:"CAPACITY"`
- PoolName string `json:"POOLNAME"`
- AllocType string `json:"ALLOCTYPE"`
- Name string `json:"NAME"`
-}
-
-type NFSShare struct {
- Data NFSShareData `json:"data"`
- Error `json:"error"`
-}
-
-type NFSShareData struct {
- Description string `json:"DESCRIPTION"`
- FSID string `json:"FSID"`
- ID string `json:"ID"`
- SharePath string `json:"SHAREPATH"`
- LockPolicy string `json:"LOCKPOLICY"`
- Name string `json:"NAME"`
- CharacterEncoding string `json:"CHARACTERENCODING"`
-}
-
-type CIFSShare struct {
- Data CIFSShareData `json:"data"`
- Error `json:"error"`
-}
-
-type CIFSShareData struct {
- Description string `json:"DESCRIPTION"`
- FSID string `json:"FSID"`
- ID string `json:"ID"`
- SharePath string `json:"SHAREPATH"`
- Name string `json:"NAME"`
- AbeEnable string `json:"ABEENABLE"`
- EnableCA string `json:"ENABLECA"`
- EnableFileExtensionFilter string `json:"ENABLEFILEEXTENSIONFILTER"`
- EnableNotify string `json:"ENABLENOTIFY"`
- EnableOpLock string `json:"ENABLEOPLOCK"`
- EnableIPControl string `json:"ENABLEIPCONTROL"`
- OfflineFileMode string `json:"OFFLINEFILEMODE"`
- ApplyDefaultACL string `json:"APPLYDEFAULTACL"`
-}
-
-type NFSShareList struct {
- Data []NFSShareData `json:"data"`
- Error `json:"error"`
-}
-
-type CIFSShareList struct {
- Data []CIFSShareData `json:"data"`
- Error `json:"error"`
-}
-
-// FSSnapshot file system snapshot ...
-type FSSnapshotData struct {
- Type int `json:"TYPE"`
- ID string `json:"ID"`
- Name string `json:"NAME"`
- ConsumeCapacity string `json:"CONSUMEDCAPACITY"`
- HealthStatus string `json:"HEALTHSTATUS"`
- ParentID string `json:"PARENTID"`
- ParentName string `json:"PARENTNAME"`
- ParentType int `json:"PARENTTYPE"`
- Capacity string `json:"USERCAPACITY"`
-}
-
-type FSSnapshot struct {
- Data FSSnapshotData `json:"data"`
- Error `json:"error"`
-}
-
-type FSSnapshotList struct {
- Data []FSSnapshotData `json:"data"`
- Error `json:"error"`
-}
-
-// LogicalPortList logical portal ...
-type LogicalPortList struct {
- Data []LogicalPortData `json:"data"`
- Error `json:"error"`
-}
-
-type LogicalPortData struct {
- ID string `json:"ID"`
- IpAddr string `json:"IPV4ADDR"`
-}
-
-type ShareAuthClientData struct {
- ID string `json:"ID"`
- Name string `json:"NAME"`
- accessVal string `json:"ACCESSVAL"`
-}
-
-type ShareAuthClientList struct {
- Data []ShareAuthClientData `json:"data"`
- Error `json:"error"`
-}
-
-type NFSShareClient struct {
- Error `json:"error"`
-}
-
-type CIFSShareClient struct {
- Data CIFSShareClientData `json:"data"`
- Error `json:"error"`
-}
-
-type CIFSShareClientData struct {
- DomainType string `json:"DOMAINTYPE"`
- ID string `json:"ID"`
- Name string `json:"NAME"`
- Permission string `json:"PERMISSION"`
-}
-
-type shareAuthClientCount struct {
- Data Count `json:"data"`
- Error `json:"error"`
-}
-
-type Count struct {
- Counter string `json:"COUNT"`
-}
diff --git a/contrib/drivers/filesharedrivers/oceanstor/nfs.go b/contrib/drivers/filesharedrivers/oceanstor/nfs.go
deleted file mode 100644
index 0f57a71bd..000000000
--- a/contrib/drivers/filesharedrivers/oceanstor/nfs.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package oceanstor
-
-import (
- "fmt"
- "strings"
-)
-
-type NFS struct {
- *Client
-}
-
-func (c *NFS) getShareID(share interface{}) string {
- return share.(*NFSShareData).ID
-}
-
-func (c *NFS) createShare(shareName, fsID string) (interface{}, error) {
- sharePath := getSharePath(shareName)
- data := map[string]string{
- "DESCRIPTION": "",
- "FSID": fsID,
- "SHAREPATH": sharePath,
- }
-
- url := "/NFSHARE"
-
- resp, err := c.request(url, "POST", data)
- if err != nil {
- return nil, fmt.Errorf("create nfs share %s failed: %v", sharePath, err)
- }
-
- var nfsShare NFSShare
- if err := handleReponse(resp, &nfsShare); err != nil {
- return nil, fmt.Errorf("create nfs share %s failed: %v", sharePath, err)
- }
-
- return &nfsShare.Data, nil
-}
-
-func (c *NFS) getShare(shareName string) (interface{}, error) {
- url := fmt.Sprintf("/NFSHARE?filter=SHAREPATH::%s&range=[0-100]", getSharePath(shareName))
- resp, err := c.request(url, "GET", nil)
- if err != nil {
- return nil, err
- }
-
- var nfsShareList NFSShareList
- if err := handleReponse(resp, &nfsShareList); err != nil {
- return nil, err
- }
-
- if len(nfsShareList.Data) > 0 {
- return &nfsShareList.Data[0], nil
- }
-
- return nil, nil
-}
-
-func (c *NFS) deleteShare(shareID string) error {
- url := "/nfshare/" + shareID
- resp, err := c.request(url, "DELETE", nil)
- if err != nil {
- return err
- }
-
- var errDelete DeleteError
- if err := handleReponse(resp, &errDelete); err != nil {
- return err
- }
-
- return nil
-}
-
-func (c *NFS) getShareByID(shareID string) (interface{}, error) {
- url := "/NFSHARE/" + shareID
- resp, err := c.request(url, "GET", nil)
- if err != nil {
- return nil, err
- }
-
- var nfsShare NFSShare
- if err := handleReponse(resp, &nfsShare); err != nil {
- return nil, err
- }
-
- if nfsShare.Data.ID == "" {
- return nil, nil
- }
-
- return &nfsShare.Data, nil
-}
-
-func (c *NFS) allowAccess(shareID, accessTo, accessLevel string) (interface{}, error) {
- url := "/NFS_SHARE_AUTH_CLIENT"
- data := map[string]string{
- "TYPE": "16409",
- "NAME": accessTo,
- "PARENTID": shareID,
- "ACCESSVAL": accessLevel,
- "SYNC": "0",
- "ALLSQUASH": "1",
- "ROOTSQUASH": "0",
- }
-
- resp, err := c.request(url, "Post", data)
- if err != nil {
- return nil, err
- }
-
- var nfsClient NFSShareClient
- if err := handleReponse(resp, &nfsClient); err != nil {
- return nil, err
- }
-
- return &nfsClient, nil
-}
-
-// getLocation
-func (c *NFS) getLocation(sharePath, ipAddr string) string {
- path := strings.Replace(sharePath, "-", "_", -1)
- return fmt.Sprintf("%s:/%s", ipAddr, path)
-}
-
-func (c *NFS) getAccessLevel(accessLevel string) string {
- if accessLevel == AccessLevelRW {
- return AccessNFSRw
- }
- return AccessNFSRo
-}
diff --git a/contrib/drivers/filesharedrivers/oceanstor/oceanclient.go b/contrib/drivers/filesharedrivers/oceanstor/oceanclient.go
deleted file mode 100644
index d6ba12b83..000000000
--- a/contrib/drivers/filesharedrivers/oceanstor/oceanclient.go
+++ /dev/null
@@ -1,480 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package oceanstor
-
-import (
- "bytes"
- "crypto/tls"
- "encoding/json"
- "errors"
- "fmt"
- "io/ioutil"
- "net/http"
- "runtime"
- "strconv"
- "strings"
-
- log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/utils/pwd"
-)
-
-func newRestCommon(conf *Config) (*Client, error) {
- pwdCiphertext, err := decrypte(conf)
- if err != nil {
- msg := fmt.Sprintf("decryption failed: %v", err)
- log.Error(msg)
- return nil, err
- }
-
- client := &Client{
- urlPrefix: conf.Endpoint,
- username: conf.Username,
- password: pwdCiphertext,
- header: map[string]string{"Content-Type": "application/json;charset=UTF-8"},
- }
-
- if err := tryTimes(client.login); err != nil {
- msg := fmt.Sprintf("login failed: %v", err)
- log.Error(msg)
- return nil, err
- }
-
- return client, nil
-}
-
-func decrypte(conf *Config) (string, error) {
- var pwdCiphertext = conf.Password
-
- if conf.EnableEncrypted {
- // Decrypte the password
- pwdTool := pwd.NewPwdEncrypter(conf.PwdEncrypter)
- password, err := pwdTool.Decrypter(pwdCiphertext)
- if err != nil {
- return "", err
- }
- pwdCiphertext = password
- }
-
- return pwdCiphertext, nil
-}
-
-func (c *Client) login() error {
- auth, err := c.getAuth()
- if err != nil {
- return err
- }
-
- if auth.AccountState == PwdReset || auth.AccountState == PwdExpired {
- c.logout()
- return errors.New("password has expired or must be reset, please change the password")
- }
-
- if auth.DeviceId == "" {
- c.logout()
- return fmt.Errorf("failed to login with rest URLs %s", c.urlPrefix)
- }
-
- c.urlPrefix += "/" + auth.DeviceId
-
- c.header["Connection"] = "keep-alive"
- c.header["iBaseToken"] = auth.IBaseToken
- c.header["Cookie"] = c.cookie
-
- return nil
-}
-
-func (c *Client) getAuth() (*AuthData, error) {
- data := map[string]string{
- "username": c.username,
- "password": c.password,
- "scope": "0",
- }
-
- url := "/xxxxx/sessions"
- resp, err := c.request(url, "POST", data)
- if err != nil {
- return nil, err
- }
-
- var auth Auth
-
- if err := handleReponse(resp, &auth); err != nil {
- return nil, err
- }
-
- return &auth.Data, nil
-}
-
-type Protocol interface {
- createShare(fsName, fsID string) (interface{}, error)
- getShare(fsName string) (interface{}, error)
- getShareID(share interface{}) string
- deleteShare(shareID string) error
- getShareByID(shareID string) (interface{}, error)
- getLocation(sharePath, ipAddr string) string
- allowAccess(shareID, accessTo, accessLevel string) (interface{}, error)
- getAccessLevel(accessLevel string) string
-}
-
-func NewProtocol(proto string, c *Client) Protocol {
- switch proto {
- case NFSProto:
- return &NFS{Client: c}
- case CIFSProto:
- return &CIFS{Client: c}
- }
-
- return nil
-}
-
-func (c *Client) createFileSystem(name, poolID string, size int64) (*FileSystemData, error) {
- data := map[string]interface{}{
- "PARENTID": poolID,
- "NAME": name,
- "PARENTTYPE": 216,
- "ALLOCTYPE": 1,
- "CAPACITY": Gb2Sector(size),
- }
-
- url := "/filesystem"
- resp, err := c.request(url, "POST", data)
- if err != nil {
- return nil, err
- }
-
- var fileSystem FileSystem
-
- if err := handleReponse(resp, &fileSystem); err != nil {
- return nil, err
- }
-
- return &fileSystem.Data, nil
-}
-
-func (c *Client) deleteFS(fsID string) error {
- url := "/filesystem/" + fsID
- resp, err := c.request(url, "DELETE", nil)
- if err != nil {
- return err
- }
-
- var errDelete DeleteError
- if err := handleReponse(resp, &errDelete); err != nil {
- return err
- }
-
- return nil
-}
-
-func (c *Client) getAllLogicalPort() ([]LogicalPortData, error) {
- url := "/LIF"
- resp, err := c.request(url, "GET", nil)
- if err != nil {
- return nil, err
- }
-
- var logicalPortList LogicalPortList
-
- if err := handleReponse(resp, &logicalPortList); err != nil {
- return nil, err
- }
-
- return logicalPortList.Data, nil
-}
-
-func (c *Client) getFileSystem(fsid string) (*FileSystemData, error) {
- url := "/filesystem/" + fsid
- resp, err := c.request(url, "GET", nil)
- if err != nil {
- return nil, err
- }
-
- var fileSystem FileSystem
- if err := handleReponse(resp, &fileSystem); err != nil {
- return nil, err
- }
-
- return &fileSystem.Data, nil
-}
-
-func (c *Client) getFileSystemByName(name string) ([]FileSystemData, error) {
- url := "/filesystem?filter=NAME::" + name
- resp, err := c.request(url, "GET", nil)
- if err != nil {
- return nil, err
- }
-
- var fsList FileSystemList
-
- if err := handleReponse(resp, &fsList); err != nil {
- return nil, err
- }
-
- return fsList.Data, nil
-}
-
-func (c *Client) ListStoragePools() ([]StoragePool, error) {
- resp, err := c.request("/storagepool", "GET", nil)
- if err != nil {
- return nil, err
- }
-
- var pools StoragePoolList
- if err := handleReponse(resp, &pools); err != nil {
- return nil, err
- }
-
- return pools.Data, nil
-}
-
-func (c *Client) createSnapshot(fsID, snapName string) (*FSSnapshotData, error) {
- data := map[string]string{
- "PARENTTYPE": "40",
- "TYPE": "48",
- "PARENTID": fsID,
- "NAME": strings.Replace(snapName, "-", "_", -1),
- "DESCRIPTION": "",
- }
- url := "/FSSNAPSHOT"
- resp, err := c.request(url, "POST", data)
- if err != nil {
- return nil, err
- }
-
- var fsSnapshot FSSnapshot
- if err := handleReponse(resp, &fsSnapshot); err != nil {
- return nil, err
- }
-
- return &fsSnapshot.Data, nil
-}
-
-func (c *Client) listSnapshots(fsID string) ([]FSSnapshotData, error) {
- url := "/FSSNAPSHOT?sortby=TIMESTAMP,d&range=[0-100]&PARENTID=" + fsID
- resp, err := c.request(url, "GET", nil)
- if err != nil {
- return nil, err
- }
-
- var fsSnapshotList FSSnapshotList
- if err := handleReponse(resp, &fsSnapshotList); err != nil {
- return nil, err
- }
-
- return fsSnapshotList.Data, nil
-}
-
-func (c *Client) deleteFSSnapshot(snapID string) error {
- url := "/FSSNAPSHOT/" + snapID
- resp, err := c.request(url, "DELETE", nil)
- if err != nil {
- return err
- }
-
- var errDelete DeleteError
- if err := handleReponse(resp, &errDelete); err != nil {
- return err
- }
-
- return nil
-}
-
-func (c *Client) showFSSnapshot(snapID string) (*FSSnapshotData, error) {
- url := "/FSSNAPSHOT/" + snapID
- resp, err := c.request(url, "GET", nil)
- if err != nil {
- return nil, err
- }
-
- var fsSnapshot FSSnapshot
- if err := json.Unmarshal(resp, &fsSnapshot); err != nil {
- return nil, err
- }
-
- return &fsSnapshot.Data, nil
-}
-
-func (c *Client) getAllFilesystem() ([]FileSystemData, error) {
- url := "/filesystem"
- resp, err := c.request(url, "GET", nil)
- if err != nil {
- return nil, err
- }
-
- var fsList FileSystemList
- if err := handleReponse(resp, &fsList); err != nil {
- return nil, err
- }
-
- return fsList.Data, nil
-}
-
-func (c *Client) getAccessCount(shareID, shareClientType string) (string, error) {
- url := fmt.Sprintf("/%s/count?filter=PARENTID::%s", shareClientType, shareID)
- resp, err := c.request(url, "GET", nil)
- if err != nil {
- return "", err
- }
-
- var count shareAuthClientCount
- if err := handleReponse(resp, &count); err != nil {
- return "", err
- }
-
- return count.Data.Counter, nil
-}
-
-func (c *Client) getAccessFromShare(shareID, accessTo, shareProto string) (string, error) {
- shareClientType, err := c.getShareClientType(shareProto)
- if err != nil {
- return "", err
- }
-
- count, err := c.getAccessCount(shareID, shareClientType)
- if err != nil {
- return "", err
- }
-
- counter, _ := strconv.Atoi(count)
- rangeBegin := 0
-
- for counter > 0 {
- accessRange, err := c.getAccessFromShareRange(shareID, shareClientType, rangeBegin)
- if err != nil {
- return "", nil
- }
- for _, v := range accessRange {
- if v.Name == accessTo {
- return v.ID, nil
- }
- }
-
- rangeBegin += 100
- counter -= 100
- }
-
- return "", nil
-}
-
-func (c *Client) getAccessFromShareRange(shareID, shareClientType string, rangeBegin int) ([]ShareAuthClientData, error) {
- rangeEnd := rangeBegin + 100
- url := fmt.Sprintf("/%s?filter=PARENTID::%s&range=[%d-%d]", shareClientType, shareID, rangeBegin, rangeEnd)
- resp, err := c.request(url, "GET", nil)
- if err != nil {
- return nil, err
- }
- var shareClientList ShareAuthClientList
-
- if err := handleReponse(resp, &shareClientList); err != nil {
- return nil, err
- }
-
- return shareClientList.Data, nil
-}
-
-func (c *Client) getShareClientType(shareProto string) (string, error) {
- switch shareProto {
- case NFSProto:
- return "NFS_SHARE_AUTH_CLIENT", nil
- case CIFSProto:
- return "CIFS_SHARE_AUTH_CLIENT", nil
-
- }
-
- return "", fmt.Errorf("invalid NAS protocol supplied: %s", shareProto)
-}
-
-func (c *Client) removeAccessFromShare(accessID, shareProto string) error {
- shareClientType, err := c.getShareClientType(shareProto)
- if err != nil {
- return err
- }
-
- url := fmt.Sprintf("/%s/%s", shareClientType, accessID)
- resp, err := c.request(url, "DELETE", nil)
- if err != nil {
- return err
- }
-
- var errDelete DeleteError
- if err := handleReponse(resp, &errDelete); err != nil {
- return err
- }
-
- return nil
-}
-
-func (c *Client) logout() error {
- _, err := c.request("/sessions", "DELETE", nil)
- if err != nil {
- return err
- }
- return nil
-}
-
-func (c *Client) request(url, method string, reqParams interface{}) ([]byte, error) {
- callUrl := c.urlPrefix + url
- // No verify by SSL
- tr := &http.Transport{
- TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
- }
- // initialize http client
- client := &http.Client{Transport: tr}
-
- var body []byte
- var err error
- if reqParams != nil {
- body, err = json.Marshal(reqParams)
- if err != nil {
- return nil, fmt.Errorf("failed to marshal the request parameters, url is %s, error is %v", callUrl, err)
- }
- }
-
- req, err := http.NewRequest(strings.ToUpper(method), callUrl, bytes.NewBuffer(body))
- if err != nil {
- return nil, fmt.Errorf("failed to initiate the request, url is %s, error is %v", callUrl, err)
- }
-
- // initiate the header
- for k, v := range c.header {
- req.Header.Set(k, v)
- }
-
- // do the request
- resp, err := client.Do(req)
- if err != nil {
- return nil, fmt.Errorf("process request failed: %v, url is %s", err, callUrl)
- }
- defer resp.Body.Close()
-
- respContent, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return nil, fmt.Errorf("read from response body failed: %v, url is %s", err, callUrl)
- }
-
- if 400 <= resp.StatusCode && resp.StatusCode <= 599 {
- pc, _, line, _ := runtime.Caller(1)
- return nil, fmt.Errorf("return status code is: %s, return content is: %s, error function is: %s, error line is: %s, url is %s",
- strconv.Itoa(resp.StatusCode), string(respContent), runtime.FuncForPC(pc).Name(), strconv.Itoa(line), callUrl)
- }
-
- if c.cookie == "" && resp.Header != nil {
- if cookie := resp.Header.Get("set-cookie"); cookie != "" {
- c.cookie = cookie
- }
- }
-
- return respContent, nil
-}
diff --git a/contrib/drivers/filesharedrivers/oceanstor/oceanstor.go b/contrib/drivers/filesharedrivers/oceanstor/oceanstor.go
deleted file mode 100644
index fdc91a8cf..000000000
--- a/contrib/drivers/filesharedrivers/oceanstor/oceanstor.go
+++ /dev/null
@@ -1,697 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package oceanstor
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "os"
- "strconv"
- "time"
-
- log "github.com/golang/glog"
- . "github.com/opensds/opensds/contrib/drivers/utils/config"
- model "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- "github.com/opensds/opensds/pkg/utils"
- "github.com/opensds/opensds/pkg/utils/config"
- uuid "github.com/satori/go.uuid"
-)
-
-func (d *Driver) Setup() error {
- if d.Client != nil {
- return nil
- }
-
- var err error
-
- d.InitConf()
- cli, err := newRestCommon(d.Config)
- if err != nil {
- msg := fmt.Sprintf("get new client failed: %v", err)
- log.Error(msg)
- return errors.New(msg)
- }
-
- d.Client = cli
- log.Info("get oceanstor client successfully")
-
- return nil
-}
-
-func (d *Driver) InitConf() {
- path := config.CONF.OsdsDock.Backends.HuaweiOceanStorFile.ConfigPath
- if path == "" {
- path = DefaultConfPath
- }
-
- conf := &Config{}
- d.Config = conf
- Parse(conf, path)
-}
-
-func (d *Driver) Unset() error {
-
- if err := d.logout(); err != nil {
- msg := fmt.Sprintf("logout failed: %v", err)
- log.Error(msg)
- return errors.New(msg)
- }
-
- return nil
-}
-
-func (d *Driver) CreateFileShare(opt *pb.CreateFileShareOpts) (*model.FileShareSpec, error) {
- fsName := opt.GetName()
- size := opt.GetSize()
- prf := opt.GetProfile()
- poolID := opt.GetPoolName()
- shareProto := ""
-
- err := d.parameterCheck(poolID, prf, size, &fsName, &shareProto)
- if err != nil {
- log.Error(err.Error())
- return nil, err
- }
-
- // create file system
- fs, err := d.createFileSystemIfNotExist(fsName, poolID, size)
- if err != nil {
- msg := fmt.Sprintf("create file system failed: %v", err)
- log.Error(msg)
- return nil, errors.New(msg)
- }
-
- shareDriver := NewProtocol(shareProto, d.Client)
- // create file share if not exist
- shareID, err := d.createShareIfNotExist(fsName, fs.ID, shareDriver)
- if err != nil {
- return nil, err
- }
-
- // get location
- location, err := d.getShareLocation(fsName, shareDriver)
- if err != nil {
- msg := fmt.Sprintf("get share location failed: %v", err)
- log.Error(msg)
- return nil, errors.New(msg)
- }
-
- share := &model.FileShareSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Protocols: []string{shareProto},
- Description: opt.GetDescription(),
- Size: size,
- AvailabilityZone: opt.GetAvailabilityZone(),
- PoolId: poolID,
- ExportLocations: location,
- Metadata: map[string]string{FileShareName: fsName, FileShareID: shareID},
- }
- return share, nil
-}
-
-func (d *Driver) parameterCheck(poolID, prf string, size int64, fsName, shareProto *string) error {
- // Parameter check
- if poolID == "" {
- msg := "pool id cannot be empty"
- log.Error(msg)
- return errors.New(msg)
- }
-
- if *fsName == "" {
- log.Infof("use default file system name %s", defaultFileSystem)
- *fsName = defaultFileSystem
- }
-
- proto, err := d.GetProtoFromProfile(prf)
- if err != nil {
- return err
- }
-
- if !checkProtocol(proto) {
- return fmt.Errorf("%s protocol is not supported, support is %s and %s", proto, NFSProto, CIFSProto)
- }
-
- *shareProto = proto
-
- if size == 0 {
- return errors.New("size must be greater than 0")
- }
-
- return nil
-}
-
-func (d *Driver) createShareIfNotExist(fsName, fsID string, shareDriver Protocol) (string, error) {
- sharePath := getSharePath(fsName)
- share, err := shareDriver.getShare(fsName)
- if err != nil {
- return "", fmt.Errorf("get share %s failed: %v", sharePath, err)
- }
-
- if share != nil {
- log.Infof("share %s already exist", sharePath)
- return "", nil
- }
-
- share, err = shareDriver.createShare(fsName, fsID)
- if err != nil {
- shareDriver.deleteShare(fsName)
- return "", fmt.Errorf("create share %s failed: %v", sharePath, err)
- }
-
- log.Infof("create share %s successfully", sharePath)
- return shareDriver.getShareID(share), nil
-}
-
-func (d *Driver) getShareLocation(fsName string, shareDriver Protocol) ([]string, error) {
- logicalPortList, err := d.getAllLogicalPort()
- if err != nil {
- return nil, err
- }
-
- location, err := d.getLocationPath(fsName, logicalPortList, shareDriver)
- if err != nil {
- return nil, err
- }
-
- return location, nil
-}
-
-// createFileSystemIfNotExist
-func (d *Driver) createFileSystemIfNotExist(fsName, poolID string, size int64) (*FileSystemData, error) {
- fsList, err := d.getFileSystemByName(fsName)
- if err != nil {
- return nil, fmt.Errorf("get filesystem %s by name failed: %v", fsName, err)
- }
-
- if len(fsList) == 0 {
- fs, err := d.createFileSystem(fsName, poolID, size)
- if err != nil {
- d.DeleteFileSystem(fsName)
- return nil, fmt.Errorf("create file system %s failed, %v", fsName, err)
- }
-
- err = d.checkFsStatus(fs, poolID)
- if err != nil {
- return nil, err
- }
-
- log.Infof("create filesystem %s successfully", fsName)
- return fs, nil
- }
-
- log.Infof("filesystem %s already exist", fsName)
- return &fsList[0], nil
-}
-
-func (d *Driver) getLocationPath(sharePath string, logicalPortList []LogicalPortData, shareDriver Protocol) ([]string, error) {
- if len(logicalPortList) == 0 {
- return nil, errors.New("cannot find file share server end logical ip")
- }
-
- var location []string
-
- for _, port := range logicalPortList {
- location = append(location, shareDriver.getLocation(sharePath, port.IpAddr))
- }
-
- return location, nil
-}
-
-func (d *Driver) checkFsStatus(fs *FileSystemData, poolID string) error {
- ticker := time.NewTicker(3 * time.Second)
- timeout := time.After(1 * time.Minute)
- var fsStable *FileSystemData
- var err error
- for {
- select {
- case <-ticker.C:
- fsStable, err = d.getFileSystem(fs.ID)
- if err != nil {
- d.DeleteFileSystem(fs.Name)
- return fmt.Errorf("check file system status failed: %v", err)
- }
-
- if fsStable.HealthStatus == StatusFSHealth && fsStable.RunningStatus == StatusFSRunning {
- return nil
- }
-
- case <-timeout:
- d.DeleteFileSystem(fs.Name)
- return fmt.Errorf("timeout occured waiting for checking file system status %s or invalid status health:%s, running:%s", fsStable.ID, fsStable.HealthStatus, fsStable.RunningStatus)
- }
- }
-}
-
-func (d *Driver) ListPools() ([]*model.StoragePoolSpec, error) {
- var pols []*model.StoragePoolSpec
- sp, err := d.ListStoragePools()
- if err != nil {
- msg := fmt.Sprintf("list pools from storage failed: %v", err)
- log.Error(msg)
- return nil, errors.New(msg)
- }
-
- c := d.Config
- for _, p := range sp {
- if _, ok := c.Pool[p.Name]; !ok {
- continue
- }
- host, _ := os.Hostname()
- name := fmt.Sprintf("%s:%s:%s", host, d.Endpoint, p.Id)
-
- userTotalCapacity, _ := strconv.ParseInt(p.UserTotalCapacity, 10, 64)
- userFreeCapacity, _ := strconv.ParseInt(p.UserFreeCapacity, 10, 64)
-
- pol := &model.StoragePoolSpec{
- BaseModel: &model.BaseModel{
- Id: uuid.NewV5(uuid.NamespaceOID, name).String(),
- },
- Name: p.Name,
- TotalCapacity: Sector2Gb(userTotalCapacity),
- FreeCapacity: Sector2Gb(userFreeCapacity),
- StorageType: c.Pool[p.Name].StorageType,
- Extras: c.Pool[p.Name].Extras,
- AvailabilityZone: c.Pool[p.Name].AvailabilityZone,
- }
- if pol.AvailabilityZone == "" {
- pol.AvailabilityZone = defaultAZ
- }
- pols = append(pols, pol)
- }
-
- if len(pols) == 0 {
- msg := "pools in configuration file not found"
- log.Error(msg)
- return nil, errors.New(msg)
- }
-
- log.Info("list pools successfully")
-
- return pols, nil
-}
-
-func (d *Driver) getFSInfo(fsName string) (*FileSystemData, error) {
- fsList, err := d.getFileSystemByName(fsName)
- if err != nil {
- return nil, fmt.Errorf("get filesystem %s by name failed: %v", fsName, err)
- }
-
- if len(fsList) == 0 {
- return nil, fmt.Errorf("filesystem %s does not exist", fsName)
- }
-
- return &fsList[0], nil
-}
-
-func (d *Driver) GetProtoFromProfile(prf string) (string, error) {
- if prf == "" {
- msg := "profile cannot be empty"
- return "", errors.New(msg)
- }
-
- log.V(5).Infof("file share profile is %s", prf)
- profile := &model.ProfileSpec{}
- err := json.Unmarshal([]byte(prf), profile)
- if err != nil {
- msg := fmt.Sprintf("unmarshal profile failed: %v", err)
- return "", errors.New(msg)
- }
-
- shareProto := profile.ProvisioningProperties.IOConnectivity.AccessProtocol
- if shareProto == "" {
- msg := "file share protocol cannot be empty"
- return "", errors.New(msg)
- }
-
- return shareProto, nil
-}
-
-func (d *Driver) DeleteFileShare(opt *pb.DeleteFileShareOpts) error {
- shareProto, err := d.GetProtoFromProfile(opt.GetProfile())
- if err != nil {
- log.Error(err.Error())
- return err
- }
-
- meta := opt.GetMetadata()
- if meta == nil || (meta != nil && meta[FileShareName] == "" && meta[FileShareID] == "") {
- msg := "cannot get file share name and id"
- log.Error(msg)
- return errors.New(msg)
- }
-
- fsName := meta[FileShareName]
- shareID := meta[FileShareID]
-
- shareDriver := NewProtocol(shareProto, d.Client)
-
- sharePath := getSharePath(fsName)
- if err := shareDriver.deleteShare(shareID); err != nil {
- msg := fmt.Sprintf("delete file share %s failed: %v", sharePath, err)
- log.Error(msg)
- return errors.New(msg)
- }
-
- log.Infof("delete share %s successfully", sharePath)
-
- if err := d.DeleteFileSystem(fsName); err != nil {
- msg := fmt.Sprintf("delete filesystem %s failed: %v", fsName, err)
- log.Error(msg)
- return errors.New(msg)
- }
-
- log.Infof("delete file system %s successfully", fsName)
-
- return nil
-}
-
-func (d *Driver) DeleteFileSystem(fsName string) error {
- fs, err := d.getFSInfo(fsName)
- if err != nil {
- return err
- }
-
- err = d.deleteFS(fs.ID)
- if err != nil {
- return err
- }
-
- log.Infof("delete filesystem %s successfully", fs.ID)
- return nil
-}
-
-func (d *Driver) CreateFileShareSnapshot(opt *pb.CreateFileShareSnapshotOpts) (*model.FileShareSnapshotSpec, error) {
- snapID := opt.GetId()
- if snapID == "" {
- msg := "snapshot id cannot be empty"
- log.Error(msg)
- return nil, errors.New(msg)
- }
-
- meta := opt.GetMetadata()
-
- if meta == nil || (meta != nil && meta[FileShareName] == "" && meta[FileShareID] == "") {
- msg := "cannot get file share name and id"
- log.Error(msg)
- return nil, errors.New(msg)
- }
-
- fsName := meta[FileShareName]
-
- fs, err := d.getFSInfo(fsName)
- if err != nil {
- msg := fmt.Sprintf("get file system %s failed: %v", fsName, err)
- log.Error(msg)
- return nil, errors.New(msg)
- }
-
- if fs == nil {
- msg := fmt.Sprintf("%s does not exist", fsName)
- log.Error(msg)
- return nil, errors.New(msg)
- }
-
- snapName := EncodeName(snapID)
-
- fsSnapshot, err := d.createSnapshot(fs.ID, snapName)
- if err != nil {
- msg := fmt.Sprintf("create filesystem snapshot failed: %v", err)
- log.Error(msg)
- return nil, errors.New(msg)
- }
-
- snapSize, _ := strconv.ParseInt(fsSnapshot.Capacity, 10, 64)
-
- return &model.FileShareSnapshotSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: snapName,
- Description: opt.GetDescription(),
- SnapshotSize: snapSize,
- Metadata: map[string]string{FileShareSnapshotID: fsSnapshot.ID},
- }, nil
-}
-
-func (d *Driver) DeleteFileShareSnapshot(opt *pb.DeleteFileShareSnapshotOpts) error {
- meta := opt.GetMetadata()
- if meta == nil || (meta != nil && meta[FileShareSnapshotID] == "") {
- msg := "cannot get file share snapshot id"
- log.Error(msg)
- return errors.New(msg)
- }
-
- snapID := meta[FileShareSnapshotID]
-
- err := d.deleteFSSnapshot(snapID)
- if err != nil {
- msg := fmt.Sprintf("delete filesystem snapshot %s failed, %v", snapID, err)
- log.Error(msg)
- return errors.New(msg)
- }
-
- log.Infof("delete file share snapshot %s successfully", snapID)
-
- return nil
-}
-
-func (d *Driver) getAccessLevel(accessLevels []string, shareProto string) (string, error) {
- var accessLevel string
-
- if accessLevels == nil || (accessLevels != nil && len(accessLevels) == 0) {
- return "", errors.New("access level cannot be empty")
- }
-
- supportAccessLevels := []string{AccessLevelRead, AccessLevelWrite}
-
- if len(accessLevels) > len(supportAccessLevels) {
- return "", errors.New("invalid access level")
- }
-
- accessLevel = "ro"
- for _, v := range accessLevels {
- if !utils.Contained(v, supportAccessLevels) {
- return "", errors.New("only read only or read write access level are supported")
- }
- if v == AccessLevelWrite {
- accessLevel = "rw"
- }
- }
-
- shareDriver := NewProtocol(shareProto, d.Client)
- return shareDriver.getAccessLevel(accessLevel), nil
-}
-
-func (d *Driver) CreateFileShareAclParamCheck(opt *pb.CreateFileShareAclOpts) (string, string, string, string, error) {
- log.V(5).Infof("create file share access client parameters %#v", opt)
- meta := opt.GetMetadata()
-
- if meta == nil || (meta != nil && meta[FileShareName] == "" && meta[FileShareID] == "") {
- msg := "cannot get file share name and id"
- log.Error(msg)
- return "", "", "", "", errors.New(msg)
- }
-
- fsName := meta[FileShareName]
- if fsName == "" {
- return "", "", "", "", errors.New("fileshare name cannot be empty")
- }
-
- shareProto, err := d.GetProtoFromProfile(opt.Profile)
- if err != nil {
- return "", "", "", "", err
- }
-
- if !checkProtocol(shareProto) {
- return "", "", "", "", fmt.Errorf("%s protocol is not supported, support is NFS and CIFS", shareProto)
- }
-
- accessLevels := opt.GetAccessCapability()
-
- accessLevel, err := d.getAccessLevel(accessLevels, shareProto)
- if err != nil {
- return "", "", "", "", err
- }
-
- accessType := opt.Type
- if !checkAccessType(accessType) {
- return "", "", "", "", fmt.Errorf("only access type %s and %s are supported", AccessTypeUser, AccessTypeIp)
- }
- if shareProto == CIFSProto && accessType != AccessTypeUser {
- return "", "", "", "", errors.New("only USER access type is allowed for CIFS shares")
- }
-
- accessTo := opt.GetAccessTo()
- if accessTo == "" {
- return "", "", "", "", errors.New("access client cannot be empty")
- }
-
- if shareProto == NFSProto {
- if accessType == AccessTypeUser {
- accessTo += "@"
- } else {
- accessTo = "*"
- }
- }
-
- return fsName, shareProto, accessLevel, accessTo, nil
-}
-
-// AllowAccess allow access to the share
-func (d *Driver) CreateFileShareAcl(opt *pb.CreateFileShareAclOpts) (*model.FileShareAclSpec, error) {
- shareName, shareProto, accessLevel, accessTo, err := d.CreateFileShareAclParamCheck(opt)
- if err != nil {
- msg := fmt.Sprintf("create fileshare access client failed: %v", err)
- log.Error(msg)
- return nil, errors.New(msg)
- }
-
- shareDriver := NewProtocol(shareProto, d.Client)
-
- share, err := shareDriver.getShare(shareName)
- if err != nil {
- log.Error(err.Error())
- return nil, err
- }
-
- if share == nil {
- return nil, fmt.Errorf("share %s does not exist", shareName)
- }
-
- shareID := shareDriver.getShareID(share)
-
- err = d.createAccessIfNotExist(shareID, accessTo, shareProto, accessLevel, shareDriver)
- if err != nil {
- msg := fmt.Sprintf("allow access %s to %s failed %v", accessTo, shareName, err)
- log.Error(msg)
- return nil, errors.New(msg)
- }
-
- shareAccess := &model.FileShareAclSpec{
- BaseModel: &model.BaseModel{
- Id: opt.Id,
- },
- AccessTo: accessTo,
- Metadata: map[string]string{FileShareName: shareName},
- }
-
- return shareAccess, nil
-}
-
-func (d *Driver) createAccessIfNotExist(shareID, accessTo, shareProto, accessLevel string, shareDriver Protocol) error {
- // Check if access already exists
- accessID, err := d.getAccessFromShare(shareID, accessTo, shareProto)
- if err != nil {
- return err
- }
-
- if accessID != "" {
- log.Infof("fileshare access %s already exists", accessID)
- return nil
- }
-
- if _, err := shareDriver.allowAccess(shareID, accessTo, accessLevel); err != nil {
- return err
- }
-
- log.Infof("create fileshare access successfully")
-
- return nil
-}
-
-func (d *Driver) DeleteFileShareAcl(opt *pb.DeleteFileShareAclOpts) error {
- shareName, shareProto, accessTo, err := d.DeleteFileShareAclParamCheck(opt)
- if err != nil {
- log.Error(err.Error())
- return err
- }
-
- accessTo = "*"
-
- shareDriver := NewProtocol(shareProto, d.Client)
-
- share, err := shareDriver.getShare(shareName)
- if err != nil {
- log.Error(err.Error())
- return err
- }
-
- if share == nil {
- msg := fmt.Sprintf("share %s does not exist", shareName)
- log.Error(msg)
- return errors.New(msg)
- }
-
- shareID := shareDriver.getShareID(share)
-
- accessID, err := d.getAccessFromShare(shareID, accessTo, shareProto)
- if err != nil {
- msg := fmt.Sprintf("get access from share failed: %v", err)
- log.Error(msg)
- return errors.New(msg)
- }
-
- if accessID == "" {
- msg := fmt.Sprintf("can not get access id from share %s", shareName)
- log.Error(msg)
- return errors.New(msg)
- }
-
- if err := d.removeAccessFromShare(accessID, shareProto); err != nil {
- msg := fmt.Sprintf("remove access from share failed: %v", err)
- log.Error(msg)
- return errors.New(msg)
- }
-
- return nil
-}
-
-func (d *Driver) DeleteFileShareAclParamCheck(opt *pb.DeleteFileShareAclOpts) (string, string, string, error) {
- meta := opt.GetMetadata()
- if meta == nil || (meta != nil && meta[FileShareName] == "") {
- return "", "", "", errors.New("fileshare name cannot be empty when deleting file share access client")
- }
-
- fsName := meta[FileShareName]
-
- shareProto, err := d.GetProtoFromProfile(opt.Profile)
- if err != nil {
- return "", "", "", err
- }
-
- if !checkProtocol(shareProto) {
- return "", "", "", fmt.Errorf("%s protocol is not supported, support is NFS and CIFS", shareProto)
- }
-
- accessType := opt.Type
- if !checkAccessType(accessType) {
- return "", "", "", fmt.Errorf("only access type %s and %s are supported", AccessTypeUser, AccessTypeIp)
- }
- if shareProto == CIFSProto && accessType != AccessTypeUser {
- return "", "", "", fmt.Errorf("only USER access type is allowed for CIFS shares")
- }
-
- accessTo := opt.GetAccessTo()
- if accessTo == "" {
- return "", "", "", errors.New("cannot find access client")
- }
-
- return fsName, shareProto, accessTo, nil
-}
diff --git a/contrib/drivers/filesharedrivers/oceanstor/testdata/oceanstor.yaml b/contrib/drivers/filesharedrivers/oceanstor/testdata/oceanstor.yaml
deleted file mode 100644
index 5bbf75e14..000000000
--- a/contrib/drivers/filesharedrivers/oceanstor/testdata/oceanstor.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-authOptions:
- username: "root"
- password: "Admin@123"
- # Whether to encrypt the password. If enabled, the value of the password must be ciphertext.
- EnableEncrypted: false
- # Encryption and decryption tool. Default value is aes. The decryption tool can only decrypt the corresponding ciphertext.
- PwdEncrypter: "aes"
- uri: "https://0.0.0.0:8088/deviceManager/rest"
-
-pool:
- opensds_file:
- storageType: file
- availabilityZone: fileshare
- extras:
- dataStorage:
- storageAccessCapability: ["Read","Write","Execute"]
- ioConnectivity:
- accessProtocol: NFS
- maxIOPS: 1000
- advanced:
- diskType: SSD
- throughput: 1000
\ No newline at end of file
diff --git a/contrib/drivers/filesharedrivers/oceanstor/util.go b/contrib/drivers/filesharedrivers/oceanstor/util.go
deleted file mode 100644
index 48ea88099..000000000
--- a/contrib/drivers/filesharedrivers/oceanstor/util.go
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package oceanstor
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "reflect"
- "runtime"
- "strings"
- "time"
-
- log "github.com/golang/glog"
-)
-
-func handleReponse(respContent []byte, out interface{}) error {
- if errUnmarshal := json.Unmarshal(respContent, out); errUnmarshal != nil {
- return errUnmarshal
- }
-
- errStruct, err := findSpecifiedStruct("Error", out)
- if err != nil {
- return err
- }
-
- errResult := errStruct.(Error)
-
- if errResult.Description == "" {
- return errors.New("unable to get execution result from response content")
- }
-
- if errResult.Code != 0 {
- return errors.New(errResult.Description)
- }
-
- return nil
-}
-
-// findSpecifiedStruct Non-recursive search a specified structure from a nested structure
-func findSpecifiedStruct(specifiedStructName string, input interface{}) (interface{}, error) {
- if input == nil {
- return nil, errors.New("input cannot be nil")
- }
- if specifiedStructName == "" {
- return nil, errors.New("specified struct name cannot be empty")
- }
-
- var list []reflect.Value
-
- list = append(list, reflect.ValueOf(input))
-
- for len(list) > 0 {
- value := list[0]
- list = append(list[:0], list[1:]...)
- if value.Kind() == reflect.Ptr {
- value = value.Elem()
- }
- if value.Kind() == reflect.Struct {
- if value.Type().Name() == specifiedStructName {
- return value.Interface(), nil
- }
-
- for i := 0; i < value.NumField(); i++ {
- list = append(list, value.Field(i))
- }
- }
- }
-
- return nil, nil
-}
-
-func checkProtocol(proto string) bool {
- proList := []string{NFSProto, CIFSProto}
- for _, v := range proList {
- if v == proto {
- return true
- }
- }
- return false
-}
-
-func getSharePath(shareName string) string {
- sharePath := "/" + strings.Replace(shareName, "-", "_", -1) + "/"
- return sharePath
-}
-
-func checkAccessLevel(accessLevel string) bool {
- accessLevels := []string{AccessLevelRW, AccessLevelRO}
- for _, v := range accessLevels {
- if v == accessLevel {
- return true
- }
- }
-
- return false
-}
-
-func checkAccessType(accessType string) bool {
- accessTypes := []string{AccessTypeUser, AccessTypeIp}
- for _, v := range accessTypes {
- if v == accessType {
- return true
- }
- }
-
- return false
-}
-
-func tryTimes(f func() error) error {
- var err error
-
- pc, _, _, _ := runtime.Caller(1)
- funcName := runtime.FuncForPC(pc).Name()
-
- for i := 1; i <= MaxRetry; i++ {
- log.Infof("try to exec function %s for %d times", funcName, i)
- err = f()
- if err != nil {
- time.Sleep(5 * time.Second)
- continue
- }
- break
- }
-
- if err != nil {
- return fmt.Errorf("exec function %s failed: %v", funcName, err)
- }
- return nil
-}
-
-func Sector2Gb(sec int64) int64 {
- return sec * 512 / UnitGi
-}
-
-func Gb2Sector(gb int64) int64 {
- return gb * UnitGi / 512
-}
-
-func EncodeName(id string) string {
- return NamePrefix + "_" + id
-}
diff --git a/contrib/drivers/filesharedrivers/oceanstor/util_test.go b/contrib/drivers/filesharedrivers/oceanstor/util_test.go
deleted file mode 100644
index fadcdb503..000000000
--- a/contrib/drivers/filesharedrivers/oceanstor/util_test.go
+++ /dev/null
@@ -1,257 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package oceanstor
-
-import (
- "reflect"
- "testing"
-)
-
-var assertTestResult = func(t *testing.T, got, expected interface{}) {
- t.Helper()
- if !reflect.DeepEqual(got, expected) {
- t.Errorf("expected: %v, got: %v\n", expected, got)
- }
-}
-
-func TestHandleResponse(t *testing.T) {
- t.Run("error response", func(t *testing.T) {
- sample :=
- `{
- "data": [
- {
- "ID":"12",
- "IPV4ADDR":"1.2.3.5"
- },
- {
- "ID":"34",
- "IPV4ADDR":"3.4.5.6"
- }
- ],
- "error": {
- "code":3,
- "description":"other error"
- }
- }`
-
- var logicalPortList LogicalPortList
-
- err := handleReponse([]byte(sample), &logicalPortList)
- assertTestResult(t, err.Error(), "other error")
- })
-
- t.Run("normal response", func(t *testing.T) {
- sample :=
- `{
- "data": [
- {
- "ID":"12",
- "IPV4ADDR":"1.2.3.5"
- },
- {
- "ID":"34",
- "IPV4ADDR":"3.4.5.6"
- }
- ],
- "error": {
- "code":0,
- "description":"0"
- }
- }`
-
- var logicalPortList LogicalPortList
-
- err := handleReponse([]byte(sample), &logicalPortList)
- assertTestResult(t, err, nil)
- })
-
- t.Run("no error in response", func(t *testing.T) {
- sample :=
- `{
- "data": [
- {
- "ID":"12",
- "IPV4ADDR":"1.2.3.5"
- },
- {
- "ID":"34",
- "IPV4ADDR":"3.4.5.6"
- }
- ]
- }`
-
- var logicalPortList LogicalPortList
-
- err := handleReponse([]byte(sample), &logicalPortList)
- assertTestResult(t, err.Error(), "unable to get execution result from response content")
- })
-}
-
-func TestFindSpecifiedStruct(t *testing.T) {
- type Sample4 struct {
- Error
- Filed1 bool
- }
-
- type Sample3 struct {
- Sample4
- Filed1 string
- Filed2 int
- }
- type Sample2 struct {
- Filed1 string
- Sample3
- Filed2 int
- }
-
- type Sample1 struct {
- Filed1 string
- Filed2 int
- Sample2
- }
-
- errStruct := Error{
- Code: 1,
- Description: "test error",
- }
-
- sample4 := Sample4{
- Error: errStruct,
- Filed1: false,
- }
-
- sample3 := Sample3{
- Sample4: sample4,
- Filed1: "test3",
- Filed2: 3,
- }
-
- sample2 := Sample2{
- Filed1: "test2",
- Sample3: sample3,
- Filed2: 2,
- }
-
- sample1 := Sample1{
- Filed1: "test1",
- Filed2: 1,
- Sample2: sample2,
- }
-
- t.Run("search substructure named Error from nested structure", func(t *testing.T) {
- result, _ := findSpecifiedStruct("Error", sample1)
- errResult := result.(Error)
- assertTestResult(t, errResult.Description, "test error")
- })
-
- t.Run("search substructure named Sample3 from nested structure", func(t *testing.T) {
- result, _ := findSpecifiedStruct("Sample3", sample1)
- resultStruct := result.(Sample3)
- assertTestResult(t, resultStruct, sample3)
- })
-
- t.Run("search substructure named Sample5 from nested structure", func(t *testing.T) {
- result, _ := findSpecifiedStruct("Sample5", sample1)
- assertTestResult(t, result, nil)
- })
-
- t.Run("search substructure named Sample5 from ptr", func(t *testing.T) {
- result, _ := findSpecifiedStruct("Sample3", &sample1)
- resultStruct := result.(Sample3)
- assertTestResult(t, resultStruct, sample3)
- })
-}
-
-func TestCheckProtocol(t *testing.T) {
- t.Run("protocol is nfs", func(t *testing.T) {
- result := checkProtocol(NFSProto)
- assertTestResult(t, result, true)
- })
-
- t.Run("protocol is cifs", func(t *testing.T) {
- result := checkProtocol(CIFSProto)
- assertTestResult(t, result, true)
- })
-
- t.Run("protocol is test", func(t *testing.T) {
- result := checkProtocol("test")
- assertTestResult(t, result, false)
- })
-}
-
-func TestGetSharePath(t *testing.T) {
- t.Run("get share path", func(t *testing.T) {
- result := getSharePath("fileshare")
- assertTestResult(t, result, "/fileshare/")
- })
-}
-
-func TestCheckAccessLevel(t *testing.T) {
- t.Run("check access level", func(t *testing.T) {
- result := checkAccessLevel("rr")
- assertTestResult(t, result, false)
- })
-
- t.Run("check access level", func(t *testing.T) {
- result := checkAccessLevel("rw")
- assertTestResult(t, result, true)
- })
-
- t.Run("check access level", func(t *testing.T) {
- result := checkAccessLevel("ro")
- assertTestResult(t, result, true)
- })
-}
-
-func TestCheckAccessType(t *testing.T) {
- t.Run("check access level", func(t *testing.T) {
- result := checkAccessType("block")
- assertTestResult(t, result, false)
- })
-
- t.Run("check access level", func(t *testing.T) {
- result := checkAccessType("ip")
- assertTestResult(t, result, true)
- })
-
- t.Run("check access level", func(t *testing.T) {
- result := checkAccessType("user")
- assertTestResult(t, result, true)
- })
-}
-
-func TestTryTimes(t *testing.T) {
- t.Run("test try times", func(t *testing.T) {
- f := func() error {
- return nil
- }
- result := tryTimes(f)
- assertTestResult(t, result, nil)
- })
-}
-
-func TestSector2Gb(t *testing.T) {
- t.Run("test sector to Gb", func(t *testing.T) {
- result := Sector2Gb(12582912)
- assertTestResult(t, result, int64(6))
- })
-}
-
-func TestGb2Sector(t *testing.T) {
- t.Run("test Gb to sector", func(t *testing.T) {
- result := Gb2Sector(6)
- assertTestResult(t, result, int64(12582912))
- })
-}
diff --git a/contrib/drivers/fujitsu/eternus/client.go b/contrib/drivers/fujitsu/eternus/client.go
deleted file mode 100644
index 6880dbcf0..000000000
--- a/contrib/drivers/fujitsu/eternus/client.go
+++ /dev/null
@@ -1,1045 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package eternus
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "strconv"
- "strings"
-
- log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/utils/pwd"
- "golang.org/x/crypto/ssh"
- yaml "gopkg.in/yaml.v2"
-)
-
-// EternusClient :
-type EternusClient struct {
- user string
- password string
- endpoint string
- stdin io.WriteCloser
- stdout io.Reader
- stderr io.Reader
- cliConfPath string
-}
-
-func NewClient(opt *AuthOptions) (*EternusClient, error) {
- var pwdCiphertext = opt.Password
-
- if opt.EnableEncrypted {
- // Decrypte the password
- pwdTool := pwd.NewPwdEncrypter(opt.PwdEncrypter)
- password, err := pwdTool.Decrypter(pwdCiphertext)
- if err != nil {
- return nil, err
- }
- pwdCiphertext = password
- }
-
- c := &EternusClient{
- user: opt.Username,
- password: pwdCiphertext,
- endpoint: opt.Endpoint,
- cliConfPath: defaultCliConfPath,
- }
- return c, nil
-}
-
-func NewClientForAdmin(opt *AuthOptions) (*EternusClient, error) {
- var pwdCiphertext = opt.AdminPassword
-
- if opt.EnableEncrypted {
- // Decrypte the password
- pwdTool := pwd.NewPwdEncrypter(opt.PwdEncrypter)
- password, err := pwdTool.Decrypter(pwdCiphertext)
- if err != nil {
- return nil, err
- }
- pwdCiphertext = password
- }
-
- c := &EternusClient{
- user: opt.AdminUsername,
- password: pwdCiphertext,
- endpoint: opt.Endpoint,
- cliConfPath: defaultCliConfPath,
- }
- return c, nil
-}
-
-func (c *EternusClient) Destroy() error {
- _, err := c.stdin.Write([]byte("exit\n"))
- return err
-}
-
-func (c *EternusClient) setConfig() *ssh.ClientConfig {
- var defconfig ssh.Config
- defconfig.SetDefaults()
- cipherOrder := defconfig.Ciphers
-
- config := &ssh.ClientConfig{
- User: c.user,
- Auth: []ssh.AuthMethod{
- ssh.Password(c.password),
- },
- HostKeyCallback: ssh.InsecureIgnoreHostKey(),
- }
- config.Ciphers = append(cipherOrder, "3des-cbc")
-
- return config
-}
-
-func (c *EternusClient) createSession(config *ssh.ClientConfig) (*ssh.Session, error) {
- server := c.endpoint
- server = server + ":" + SSHPort
-
- conn, err := ssh.Dial("tcp", server, config)
- if err != nil {
- log.Error("failed to dial: " + err.Error())
- return nil, err
- }
-
- session, err := conn.NewSession()
- if err != nil {
- log.Error("failed to create session: " + err.Error())
- return nil, err
- }
-
- return session, nil
-}
-
-func (c *EternusClient) doRequest(cmd string, param map[string]string) (bytes.Buffer, error) {
- // create command option
- cmdOption := ""
- if param != nil {
- for k, v := range param {
- cmdOption += fmt.Sprintf(" -%s %s ", k, v)
- }
- }
- // execute command
- log.Infof("execute cli. cmd = %s, option = %s", cmd, cmdOption)
- c.stdin.Write([]byte(cmd + cmdOption + "\n"))
- var result bytes.Buffer
- buff := make([]byte, 65535)
-
- // get command output
- for {
- n, err := c.stdout.Read(buff)
- if err != io.EOF && err != nil {
- return result, err
- }
- n, err = result.Write(buff[:n])
-
- // ignore first '\r\nCLI>'
- if result.String() == "\r\nCLI> " {
- continue
- }
- // if error occured or suffix is 'CLI> ', break the loop
- if err == io.EOF || strings.HasSuffix(result.String(), "CLI> ") {
- break
- }
- }
- c.stdin.Write([]byte("\n"))
- return result, nil
-}
-
-func (c *EternusClient) request(cmd string, param map[string]string) ([]map[string]string, error) {
- var b bytes.Buffer
- var err error
- var resultHash []map[string]string
- success := false
- for i := 0; i < 2; i++ {
- b, err = c.doRequest(cmd, param)
- if err == nil {
- resultArray := c.convStringArray(cmd, &b)
- resultHash, err = c.parseResult(cmd, resultArray)
- if err != nil {
- log.Errorf("failed to execute cmd. err = %s, output = %v", err.Error(), resultArray)
- continue
- }
- success = true
- break
- } else {
- log.Errorf("cmd:%s %s\n param:%v", cmd, c.endpoint, param)
- }
- log.Errorf("request %d times error:%v", i+1, err)
- }
- if success == false {
- return resultHash, err
- }
- return resultHash, nil
-}
-
-// requestForadmin is temporary function for snapshot
-// Do not use the function except snapshot
-func (c *EternusClient) requestForAdmin(cmd string, param map[string]string) (bytes.Buffer, error) {
- var b bytes.Buffer
- var err error
- success := false
- for i := 0; i < 2; i++ {
- b, err = c.doRequest(cmd, param)
- if err == nil {
- success = true
- break
- } else {
- log.Errorf("cmd:%s %s\n param:%v", cmd, c.endpoint, param)
- }
- log.Errorf("request %d times error:%v", i+1, err)
- }
-
- if success == false {
- return b, err
- }
-
- for _, s := range strings.Split(b.String(), "\r\n") {
- // ignore empty line(first elem)
- if s == "" {
- continue
- }
- // ignore echo back string
- if strings.HasPrefix(s, "CLI> "+cmd) {
- continue
- }
- // ignore last line and stop parse
- if s == "CLI> " {
- break
- }
- // check error
- if strings.HasPrefix(s, "Error: ") {
- errMsg := fmt.Sprintf("failed to command output = %s", s)
- log.Error(errMsg)
- return b, errors.New(s)
- }
- }
- return b, nil
-}
-
-func (c *EternusClient) login() error {
- config := c.setConfig()
- session, err := c.createSession(config)
- if err != nil {
- log.Error("failed to get session: " + err.Error())
- return err
- }
-
- c.stdin, err = session.StdinPipe()
- if err != nil {
- log.Error("failed to get StdinPipe: " + err.Error())
- return err
- }
-
- c.stdout, err = session.StdoutPipe()
- if err != nil {
- log.Error("failed to get StdoutPipe: " + err.Error())
- return err
- }
-
- c.stderr, err = session.StderrPipe()
- if err != nil {
- log.Error("failed to get StderrPipe: " + err.Error())
- return err
- }
-
- modes := ssh.TerminalModes{
- ssh.ECHO: 0,
- ssh.ECHOCTL: 0,
- ssh.TTY_OP_ISPEED: 115200,
- ssh.TTY_OP_OSPEED: 115200,
- }
-
- err = session.RequestPty("xterm", 80, 1024, modes)
- if err != nil {
- log.Error("failed to request pty: " + err.Error())
- return err
- }
-
- err = session.Shell()
- if err != nil {
- log.Error("failed to get shell: " + err.Error())
- return err
- }
- return nil
-}
-
-func (c *EternusClient) parseResult(cmd string, resultArray [][]string) ([]map[string]string, error) {
- // read cli config file
- yamlConfig, err := ioutil.ReadFile(c.cliConfPath)
- if err != nil {
- log.Error("failed to read cli_response.yml: " + err.Error())
- return nil, err
- }
- // parse yaml
- var config map[string]map[string]([]map[string]interface{})
- err = yaml.Unmarshal(yamlConfig, &config)
-
- // get config for specified cmd
- cmdConfig := config["cli"][strings.Replace(cmd, " ", "_", -1)]
-
- // parse resultArray
- var ret []map[string]string
- resultIndex := 0
- var dataCount int
- for _, v := range cmdConfig {
- switch v["type"] {
- case "status":
- var status int
- // check for correct response
- if len(resultArray) > resultIndex {
- // get response status
- if len(resultArray[resultIndex]) == 1 {
- status, _ = strconv.Atoi(resultArray[resultIndex][0])
- } else {
- return nil, errors.New("error response. Failed to get status")
- }
- } else {
- return nil, errors.New("error response. Failed to get status")
- }
- // check response status
- if status != 0 {
- if len(resultArray) > (resultIndex+1) &&
- len(resultArray[resultIndex+1]) == 1 {
- errorCode := map[string]string{
- "errorCode": resultArray[resultIndex+1][0],
- }
- ret = append(ret, errorCode)
- }
- return ret, errors.New("error response. Command execute error")
- }
- resultIndex++
- case "count":
- // check for correct response
- if len(resultArray) > resultIndex {
- // get data count
- if len(resultArray[resultIndex]) == 1 {
- tmpCount, _ := strconv.ParseInt(resultArray[resultIndex][0], 16, 64)
- dataCount = int(tmpCount)
- } else {
- fmt.Println(resultArray[resultIndex])
- return nil, errors.New("error response. Failed to get count")
- }
- } else {
- return nil, errors.New("error response. Failed to get count")
- }
- if v["if_zero_skip_all"] == true && dataCount == 0 {
- break
- }
- resultIndex++
- case "data":
- // check drop flag
- if v["drop"] == true {
- resultIndex++
- continue
- }
- // check for correct response
- if len(resultArray) > resultIndex {
- // get single data
- datas := v["data"].([]interface{})
- result, err := c.getData(datas, resultArray[resultIndex])
- if err != nil {
- return nil, err
- }
- ret = append(ret, result)
- resultIndex++
- } else {
- return nil, errors.New("error response. Failed to get data")
- }
- case "multiple_data":
- // get multiple data, data count = dataCount variable
- datas := v["data"].([]interface{})
- for j := 0; j < dataCount; j++ {
- // check drop flag
- if v["drop"] == true {
- resultIndex++
- continue
- }
- if len(resultArray) > resultIndex {
- result, err := c.getData(datas, resultArray[resultIndex])
- if err != nil {
- return nil, err
- }
- ret = append(ret, result)
- resultIndex++
- } else {
- return nil, errors.New("error response. Failed to get multiple_data")
- }
- }
- }
- }
- return ret, nil
-}
-
-func (c *EternusClient) getData(datas []interface{}, result []string) (map[string]string, error) {
- data := map[string]string{}
- for i, v := range datas {
- // store each param
- paramName := v.(map[interface{}]interface{})["name"].(string)
- if len(result) > i {
- data[paramName] = result[i]
- } else {
- errMsg := "the response is inconsistent with the response def"
- return nil, errors.New(errMsg)
- }
- }
- return data, nil
-}
-
-func (c *EternusClient) convStringArray(cmd string, result *bytes.Buffer) [][]string {
- output := [][]string{}
- for _, s := range strings.Split(result.String(), "\r\n") {
- // ignore empty line(first elem)
- if s == "" {
- continue
- }
- // ignore echo back string
- if strings.HasPrefix(s, "CLI> "+cmd) {
- continue
- }
- // ignore last line and stop parse
- if s == "CLI> " {
- break
- }
- output = append(output, strings.Split(s, "\t"))
- }
- return output
-}
-
-// ListStoragePools :
-func (c *EternusClient) ListStoragePools() ([]StoragePool, error) {
- var pools []StoragePool
- ret, err := c.request("show thin-pro-pools", nil)
- if err != nil {
- return pools, err
- }
- for _, v := range ret {
- id, _ := strconv.ParseInt(v["tpp_number"], 16, 64)
- // calculate free capacity
- // cut off after the decimal point
- totalCapacity, _ := strconv.ParseInt(v["total_capacity"], 16, 64)
- usedCapacity, _ := strconv.ParseInt(v["used_capacity"], 16, 64)
- totalCapacity = totalCapacity / LBASize
- usedCapacity = usedCapacity / LBASize
- freeCapacity := totalCapacity - usedCapacity
-
- pool := StoragePool{}
- pool.Id = strconv.FormatInt(id, 10)
- pool.Name = v["tpp_name"]
- pool.TotalCapacity = totalCapacity
- pool.FreeCapacity = freeCapacity
- pools = append(pools, pool)
- }
- return pools, nil
-}
-
-// ListAllStoragePools :
-func (c *EternusClient) ListAllStoragePools() ([]StoragePool, error) {
- return c.ListStoragePools()
-}
-
-// GetVolume :
-func (c *EternusClient) GetVolume(lunID string) (Volume, error) {
- param := map[string]string{
- "volume-number": lunID,
- }
- return c.execGetVolume(param)
-}
-
-// GetVolumeByName :
-func (c *EternusClient) GetVolumeByName(name string) (Volume, error) {
- param := map[string]string{
- "volume-name": name,
- }
- return c.execGetVolume(param)
-}
-
-func (c *EternusClient) execGetVolume(param map[string]string) (Volume, error) {
- var vol Volume
- ret, err := c.request("show volumes", param)
- if err != nil {
- log.Error("failed to get volume information: " + err.Error())
- return vol, err
- }
- v := ret[0]
- id, _ := strconv.ParseInt(v["volume_number"], 16, 64)
- poolID, _ := strconv.ParseInt(v["pool_number"], 16, 64)
- size, _ := strconv.ParseInt(v["size"], 16, 64)
- size = size / LBASize
-
- vol.Id = strconv.FormatInt(id, 10)
- vol.Name = v["volume_name"]
- vol.Size = size
- vol.Status = v["status"]
- vol.PoolName = v["pool_name"]
- vol.PoolId = strconv.FormatInt(poolID, 10)
-
- return vol, nil
-}
-
-// CreateVolume :
-func (c *EternusClient) CreateVolume(id string, size int64, desc string,
- poolName string, provPolicy string) (Volume, error) {
-
- // use hash value because eternus has limitation of name length
- name := GetFnvHash(id)
- sizeGB := fmt.Sprintf("%dgb", size)
- allocation := "thin"
- if provPolicy != "Thin" {
- allocation = "thick"
- }
- param := map[string]string{
- "name": name,
- "size": sizeGB,
- "pool-name": poolName,
- "type": "tpv",
- "allocation": allocation,
- }
- var vol Volume
- ret, err := c.request(CreateVolume, param)
- if err != nil {
- log.Error("failed to create volume: " + err.Error())
- return vol, err
- }
-
- v := ret[0]
- convID, _ := strconv.ParseInt(v["volume_number"], 16, 64)
- vol.Id = strconv.FormatInt(convID, 10)
- vol.Name = name
- vol.Size = size
-
- return vol, nil
-}
-
-// DeleteVolume :
-func (c *EternusClient) DeleteVolume(volumeNumber string) error {
- param := map[string]string{
- "volume-number": volumeNumber,
- }
- _, err := c.request("delete volume", param)
- if err != nil {
- log.Error("failed to delete volume: " + err.Error())
- return err
- }
- return nil
-}
-
-// ExtendVolume :
-func (c *EternusClient) ExtendVolume(volumeNumber string, size int64) error {
- param := map[string]string{
- "volume-number": volumeNumber,
- "size": strconv.FormatInt(size, 10) + "gb",
- }
-
- _, err := c.request("expand volume", param)
- if err != nil {
- log.Error("failed to expand volume: " + err.Error())
- return err
- }
- return nil
-}
-
-// AddIscsiHostWithCheck :
-func (c *EternusClient) AddIscsiHostWithCheck(name string, iscsiName string, ipAddr string) (string, bool, error) {
- // check duplicate host. if already exists, retuen exist host id.
- ret, err := c.request("show host-iscsi-names", nil)
- if err != nil {
- log.Error("failed to show host-iscsi-names: " + err.Error())
- return "", false, err
- }
- for _, v := range ret {
- ipStr := ""
- if v["ip_version"] == "00" {
- ipStr = ParseIPv4(v["host_iscsi_ip_address"])
- } else {
- ipStr = ParseIPv6(v["host_iscsi_ip_address"])
- }
- if v["host_iscsi_name"] == iscsiName && EqualIP(ipStr, ipAddr) {
- hostNumber, _ := strconv.ParseInt(v["host_iscsi_number"], 16, 64)
- return strconv.FormatInt(hostNumber, 10), true, nil
- }
- }
-
- // create new host
- ipVersion := "ipv4"
- if !IsIPv4(ipAddr) {
- ipVersion = "ipv6"
- }
- param := map[string]string{
- "iscsi-name": iscsiName,
- "ip-version": ipVersion,
- "name": name,
- }
- if ipAddr != "" {
- param["ip"] = ipAddr
- }
- ret, err = c.request("create host-iscsi-name", param)
- if err != nil {
- log.Error("failed to create host-iscsi-name: " + err.Error())
- return "", false, err
- }
- v := ret[0]
- hostNumber, _ := strconv.ParseInt(v["host_number"], 16, 64)
- return strconv.FormatInt(hostNumber, 10), false, nil
-}
-
-// DeleteIscsiHost :
-func (c *EternusClient) DeleteIscsiHostByName(name string) error {
- param := map[string]string{
- "host-name": name,
- }
- ret, err := c.request("delete host-iscsi-name", param)
- if err != nil {
- if len(ret) == 1 && ret[0]["errorCode"] == NotFound {
- log.Info("target iscsi host already deleted")
- return nil
- }
- log.Error("failed to delete host-iscsi-name: " + err.Error())
- return err
- }
- return nil
-}
-
-// GetLunGroupByName :
-func (c *EternusClient) GetLunGroupByName(name string) (LunGroup, error) {
- lunGroup := LunGroup{}
- param := map[string]string{
- "lg-name": name,
- }
- ret, err := c.request("show lun-group", param)
- if err != nil {
- log.Error("failed to show lun-group: " + err.Error())
- return lunGroup, err
- }
- lunGroupVolumes := []LunGroupVolume{}
- for _, v := range ret {
- vol := LunGroupVolume{}
- volID, _ := strconv.ParseInt(v["volume_no"], 16, 64)
- hostLunID, _ := strconv.ParseInt(v["lun"], 16, 64)
- tmpSize, _ := strconv.ParseInt(v["total_capacity"], 16, 64)
- size := tmpSize / LBASize
-
- vol.Id = strconv.FormatInt(volID, 10)
- vol.Name = v["volume_name"]
- vol.RawStatus = v["volume_raw_status"]
- vol.RoundStatus = v["volume_round_status"]
- vol.Size = size
- vol.Uid = v["uid"]
- vol.Lun = strconv.FormatInt(hostLunID, 10)
-
- lunGroupVolumes = append(lunGroupVolumes, vol)
- }
- lunGroup.Volumes = lunGroupVolumes
- return lunGroup, nil
-}
-
-// AddLunGroupWithCheck :
-func (c *EternusClient) AddLunGroupWithCheck(lgName string, lunID string) (string, error) {
- // check lunGrp
- ret, err := c.request("show lun-groups", nil)
- if err != nil {
- log.Error("failed to show lun-groups: " + err.Error())
- return "", err
- }
- lgNumberStr := ""
- for _, v := range ret {
- if v["lun_group_name"] == lgName {
- lgNumber, _ := strconv.ParseInt(v["lun_group_no"], 10, 64)
- lgNumberStr = strconv.FormatInt(lgNumber, 10)
- break
- }
- }
- // if already exists for the target host, add volume to the lunGrp.
- if lgNumberStr != "" {
- param := map[string]string{
- "volume-number": lunID,
- "lg-number": lgNumberStr,
- }
- ret, err = c.request("set lun-group", param)
- if err != nil {
- log.Error("failed to set lun-group: " + err.Error())
- return "", err
- }
- return lgNumberStr, nil
- }
-
- // if does not exists for the target host, create new lunGrp.
- lun := "0"
- param := map[string]string{
- "name": lgName,
- "volume-number": lunID,
- "lun": lun,
- }
- ret, err = c.request("create lun-group", param)
- if err != nil {
- log.Error("failed to create lun-group: " + err.Error())
- return "", err
- }
- v := ret[0]
- lunNumber, _ := strconv.ParseInt(v["lun_group_number"], 16, 64)
- return strconv.FormatInt(lunNumber, 10), nil
-}
-
-// RemoveVolumeFromLunGroup :
-func (c *EternusClient) RemoveVolumeFromLunGroup(lunID string, lgName string) error {
- param := map[string]string{
- "lg-name": lgName,
- "lun": lunID,
- }
- _, err := c.request("delete lun-group", param)
- if err != nil {
- log.Error("failed to remove volume from lun-group: " + err.Error())
- return err
- }
- return nil
-}
-
-// DeleteLunGroup :
-func (c *EternusClient) DeleteLunGroupByName(lgName string) error {
- param := map[string]string{
- "lg-name": lgName,
- }
- ret, err := c.request("delete lun-group", param)
- if err != nil {
- if len(ret) == 1 && ret[0]["errorCode"] == NotFound {
- log.Info("target lun group already deleted")
- return nil
- }
- log.Error("failed to delete lun-group: " + err.Error())
- return err
- }
- return nil
-}
-
-// GetIscsiPortInfo :
-func (c *EternusClient) GetIscsiPortInfo(ceSupport bool, needHostAffinity bool) (IscsiPortInfo, error) {
- portInfo := IscsiPortInfo{}
- // select port
- ret, err := c.request("show iscsi-parameters", nil)
- if err != nil {
- log.Error("failed to get iscsi-parameters: " + err.Error())
- return portInfo, err
- }
-
- usePort, portNumber := c.getConnectionPort(ret, ceSupport, needHostAffinity)
- if portNumber == "" {
- msg := "there is no iscsi port."
- log.Error(msg)
- return portInfo, errors.New(msg)
- }
-
- tcpPort, _ := strconv.ParseInt(usePort["tcp_port_number"], 16, 64)
- isnsPort, _ := strconv.ParseInt(usePort["isns_server_port"], 16, 64)
- portInfo.PortNumber = portNumber
- portInfo.IscsiName = usePort["iscsi_name"]
- portInfo.Ip = usePort["ip_address"]
- portInfo.TcpPort = int(tcpPort)
- portInfo.IsnsServerIp = usePort["isns_server_ip"]
- portInfo.IsnsServerPort = int(isnsPort)
-
- return portInfo, nil
-}
-
-// GetFcPortInfo :
-func (c *EternusClient) GetFcPortInfo(ceSupport bool, needHostAffinity bool) (FcPortInfo, error) {
- portInfo := FcPortInfo{}
- // select port
- ret, err := c.request("show fc-parameters", nil)
- if err != nil {
- log.Error("failed to get fc-parameters: " + err.Error())
- return portInfo, err
- }
- usePort, portNumber := c.getConnectionPort(ret, ceSupport, needHostAffinity)
-
- if portNumber == "" {
- msg := "there is no fc port."
- log.Error(msg)
- return portInfo, errors.New(msg)
- }
- portInfo.PortNumber = portNumber
- portInfo.Wwpn = usePort["wwpn"]
-
- return portInfo, nil
-}
-
-func (c *EternusClient) getConnectionPort(portList []map[string]string,
- ceSupport bool, needHostAffinity bool) (map[string]string, string) {
- port := ""
- usePort := map[string]string{}
- for _, v := range portList {
- // if port_mode is not "CA" and "CA/RA", skip
- if v["port_mode"] != "00" && v["port_mode"] != "04" {
- continue
- }
- if v["host_affinity"] == "00" && needHostAffinity {
- usePort = v
- break
- } else if v["host_affinity"] != "00" && !needHostAffinity {
- usePort = v
- break
- }
- }
- if len(usePort) == 0 {
- return usePort, port
- }
- if ceSupport {
- port = GetPortNumberV2(usePort["ca_module_id"], usePort["port_number"])
- } else {
- port = GetPortNumber(usePort["ca_module_id"], usePort["port_number"])
- }
- return usePort, port
-}
-
-// AddHostAffinity :
-func (c *EternusClient) AddHostAffinity(lunGrpID string, hostID string, iscsiPort string) (string, error) {
- // create new host affinity
- param := map[string]string{
- "port": iscsiPort,
- "lg-number": lunGrpID,
- "host-number": hostID,
- }
- ret, err := c.request("set host-affinity", param)
- if err != nil {
- log.Error("failed to set host-affinity: " + err.Error())
- return "", err
- }
- v := ret[0]
- lunMaskGroupNo, _ := strconv.ParseInt(v["lun_mask_group_no"], 16, 64)
- return strconv.FormatInt(lunMaskGroupNo, 10), nil
-}
-
-// DeleteHostAffinity :
-func (c *EternusClient) DeleteHostAffinity(portNumber string, hostname string) error {
- param := map[string]string{
- "port": portNumber,
- "host-name": hostname,
- "mode": "all",
- }
- ret, err := c.request("release host-affinity", param)
- if err != nil {
- if len(ret) == 1 && ret[0]["errorCode"] == NotFound {
- log.Info("target host affinity already deleted")
- return nil
- }
- log.Error("failed to release host-affinity: " + err.Error())
- return err
- }
- return nil
-}
-
-// GetHostLunID :
-func (c *EternusClient) GetHostLunID(lunGrpID string, lunID string) (string, error) {
- param := map[string]string{
- "lg-number": lunGrpID,
- }
- ret, err := c.request("show lun-group", param)
- if err != nil {
- log.Error("failed to get lun-group: " + err.Error())
- return "", err
- }
- var hostLunID int64
- for _, v := range ret {
- volID, _ := strconv.ParseInt(v["volume_no"], 16, 64)
- if strconv.FormatInt(volID, 10) == lunID {
- hostLunID, _ = strconv.ParseInt(v["lun"], 16, 64)
- }
- }
- return strconv.FormatInt(hostLunID, 10), nil
-}
-
-// AddFcHostWithCheck :
-func (c *EternusClient) AddFcHostWithCheck(name string, wwnName string) (string, bool, error) {
- // check duplicate host. if already exists, retuen exist host id.
- ret, err := c.request("show host-wwn-names", nil)
- if err != nil {
- log.Error("failed to show host-wwn-names: " + err.Error())
- return "", false, err
- }
- for _, v := range ret {
- if strings.ToUpper(v["host_wwn_name"]) == strings.ToUpper(wwnName) {
- hostNumber, _ := strconv.ParseInt(v["host_wwn_no"], 16, 64)
- return strconv.FormatInt(hostNumber, 10), true, nil
- }
- }
-
- // create new host
- param := map[string]string{
- "wwn": wwnName,
- "name": name,
- }
- ret, err = c.request("create host-wwn-name", param)
- if err != nil {
- log.Error("failed to create host-wwn-name: " + err.Error())
- return "", true, err
- }
- v := ret[0]
- hostNumber, _ := strconv.ParseInt(v["host_number"], 16, 64)
- return strconv.FormatInt(hostNumber, 10), false, nil
-}
-
-// DeleteFcHost :
-func (c *EternusClient) DeleteFcHostByName(name string) error {
- param := map[string]string{
- "host-name": name,
- }
- _, err := c.request("delete host-wwn-name", param)
- if err != nil {
- log.Error("failed to delete host-wwn-name: " + err.Error())
- return err
- }
- return nil
-}
-
-// ListMapping :
-func (c *EternusClient) ListMapping(port string) ([]Mapping, error) {
- mappings := []Mapping{}
- param := map[string]string{
- "port": port,
- }
- ret, err := c.request("show mapping", param)
- if err != nil {
- log.Error("failed to show mapping: " + err.Error())
- return nil, err
- }
-
- for _, v := range ret {
- lun, _ := strconv.ParseInt(v["lun"], 16, 64)
- volID, _ := strconv.ParseInt(v["volume_number"], 16, 64)
- tmpSize, _ := strconv.ParseInt(v["volume_size"], 16, 64)
- size := tmpSize / LBASize
- tmpMap := Mapping{}
- tmpMap.Lun = strconv.FormatInt(lun, 10)
- tmpMap.VolumeNumber = strconv.FormatInt(volID, 10)
- tmpMap.VolumeName = v["volume_name"]
- tmpMap.VolumeRawStatus = v["volume_raw_status"]
- tmpMap.VolumeRoundStatus = v["volume_round_status"]
- tmpMap.VolumeSize = size
- mappings = append(mappings, tmpMap)
- }
- return mappings, nil
-}
-
-// AddMapping :
-func (c *EternusClient) AddMapping(lunID string, hostLunID string, port string) error {
- param := map[string]string{
- "port": port,
- "volume-number": lunID,
- "lun": hostLunID,
- }
- _, err := c.request("set mapping", param)
- if err != nil {
- log.Error("failed to set mapping: " + err.Error())
- return err
- }
- return nil
-}
-
-// DeleteMapping :
-func (c *EternusClient) DeleteMapping(hostLunID string, Port string) error {
- param := map[string]string{
- "port": Port,
- "lun": hostLunID,
- }
- _, err := c.request("release mapping", param)
- if err != nil {
- log.Error("failed to release mapping: " + err.Error())
- return err
- }
- return nil
-}
-
-// CreateSnapshot is for admin role
-func (c *EternusClient) CreateSnapshot(srcLunID string, destLunID string) error {
- param := map[string]string{
- "source-volume-number": srcLunID,
- "destination-volume-number": destLunID,
- }
- _, err := c.request("start advanced-copy", param)
- if err != nil {
- log.Error("failed to start advanced-copy: " + err.Error())
- return err
- }
- return nil
-}
-
-// ListSnapshot is for admin role
-func (c *EternusClient) ListSnapshot() ([]SnapShot, error) {
- param := map[string]string{
- "type": "sopc+",
- }
- cmd := "show advanced-copy-sessions"
- ret, err := c.requestForAdmin(cmd, param)
- if err != nil {
- log.Error("failed to show advanced-copy-sessions: " + err.Error())
- return nil, err
- }
- output := [][]string{}
- for i, s := range strings.Split(ret.String(), "\r\n") {
- // ignore empty line(first elem)
- if i < 5 {
- continue
- }
- // ignore last line and stop parse
- if s == "CLI> " {
- break
- }
- output = append(output, strings.Split(s, " "))
- }
- snapshotList := []SnapShot{}
- for _, v := range output {
- sp := []string{}
- snapshot := SnapShot{}
- for _, e := range v {
- if e != "" {
- sp = append(sp, e)
- }
- }
- snapshot.Sid = sp[0]
- snapshot.Gen = sp[1]
- snapshot.GenTotal = sp[2]
- snapshot.Type = sp[3]
- snapshot.VolumeType = sp[4]
- snapshot.SrcNo = sp[5]
- snapshot.SrcName = sp[6]
- snapshot.DestNo = sp[7]
- snapshot.DestName = sp[8]
- snapshot.Status = sp[9]
- snapshot.Phase = sp[10]
- snapshot.ErrorCode = sp[11]
- snapshot.Requestor = sp[12]
- snapshotList = append(snapshotList, snapshot)
- }
- return snapshotList, nil
-}
-
-// DeleteSnapshot is for admin role
-func (c *EternusClient) DeleteSnapshot(sid string) error {
- param := map[string]string{
- "session-id": sid,
- }
- _, err := c.requestForAdmin("stop advanced-copy", param)
- if err != nil {
- log.Error("failed to stop advanced-copy: " + err.Error())
- errID := strings.Split(err.Error(), " ")[1]
- if errID == ("E" + NotFound) {
- log.Info("target snapshot session already deleted. Ignore the error.")
- return nil
- }
- return err
- }
- return nil
-}
diff --git a/contrib/drivers/fujitsu/eternus/client_test.go b/contrib/drivers/fujitsu/eternus/client_test.go
deleted file mode 100644
index c16fd8797..000000000
--- a/contrib/drivers/fujitsu/eternus/client_test.go
+++ /dev/null
@@ -1,751 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package eternus
-
-import (
- "bytes"
- "io"
- "testing"
-
- mock "github.com/stretchr/testify/mock"
-)
-
-// MockWriteCloser
-type MockWriteCloser struct {
- mock.Mock
-}
-
-// MockWriteCloser function
-func (_m *MockWriteCloser) Write(in []byte) (int, error) {
- ret := _m.Called(in)
- return ret.Get(0).(int), ret.Error(1)
-}
-func (_m *MockWriteCloser) Close() error {
- ret := _m.Called()
- return ret.Error(0)
-}
-
-// MockWriteCloser
-type MockReadCloser struct {
- mock.Mock
-}
-
-// MockWriteCloser function
-func (_m *MockReadCloser) Read(in []byte) (int, error) {
- ret := _m.Called(in)
- out := ret.Get(2).([]byte)
- for i, v := range out {
- in[i] = v
- }
- return ret.Get(0).(int), ret.Error(1)
-}
-func (_m *MockReadCloser) Close() error {
- ret := _m.Called()
- return ret.Error(0)
-}
-
-func TestNewClient(t *testing.T) {
- authOptions := &AuthOptions{
- Username: "testuser",
- Password: "testpassword",
- AdminUsername: "testadminuser",
- AdminPassword: "testadminpassword",
- PwdEncrypter: "",
- EnableEncrypted: false,
- Endpoint: "endpoint",
- Insecure: false,
- }
- c, err := NewClient(authOptions)
- if err != nil {
- t.Error("Test NewClient failed")
- }
- if c.user != authOptions.Username {
- t.Error("Test NewClient failed")
- }
- if c.password != authOptions.Password {
- t.Error("Test NewClient failed")
- }
- if c.endpoint != authOptions.Endpoint {
- t.Error("Test NewClient failed")
- }
-}
-
-func TestNewClient_Encryption(t *testing.T) {
- authOptions := &AuthOptions{
- Username: "testuser",
- Password: "d82cf4dd2523237a240b4d400e3bef67c694f1afce5d96a09e10cd7f",
- AdminUsername: "testadminuser",
- AdminPassword: "testadminpassword",
- PwdEncrypter: "aes",
- EnableEncrypted: true,
- Endpoint: "endpoint",
- Insecure: false,
- }
- c, err := NewClient(authOptions)
- if err != nil {
- t.Error("Test NewClient failed")
- }
- if c.user != authOptions.Username {
- t.Error("Test NewClient failed")
- }
- if c.password != "testpassword" {
- t.Error("Test NewClient failed")
- }
- if c.endpoint != authOptions.Endpoint {
- t.Error("Test NewClient failed")
- }
-}
-
-func TestNewClient_Error(t *testing.T) {
- authOptions := &AuthOptions{
- Username: "testuser",
- Password: "d",
- AdminUsername: "testadminuser",
- AdminPassword: "testadminpassword",
- PwdEncrypter: "aes",
- EnableEncrypted: true,
- Endpoint: "endpoint",
- Insecure: false,
- }
- _, err := NewClient(authOptions)
- if err == nil {
- t.Error("Test NewClient failed")
- }
-}
-
-func TestNewClientForAdmin(t *testing.T) {
- authOptions := &AuthOptions{
- Username: "testuser",
- Password: "testpassword",
- AdminUsername: "testadminuser",
- AdminPassword: "testadminpassword",
- PwdEncrypter: "",
- EnableEncrypted: false,
- Endpoint: "endpoint",
- Insecure: false,
- }
- c, err := NewClientForAdmin(authOptions)
- if err != nil {
- t.Error("Test NewClientForAdmin failed")
- }
- if c.user != authOptions.AdminUsername {
- t.Error("Test NewClientForAdmin failed")
- }
- if c.password != authOptions.AdminPassword {
- t.Error("Test NewClientForAdmin failed")
- }
- if c.endpoint != authOptions.Endpoint {
- t.Error("Test NewClientForAdmin failed")
- }
-}
-
-func TestNewClientForAdmin_Encryption(t *testing.T) {
- authOptions := &AuthOptions{
- Username: "testuser",
- Password: "d82cf4dd2523237a240b4d400e3bef67c694f1afce5d96a09e10cd7f",
- AdminUsername: "testadminuser",
- AdminPassword: "ac867dd8ca873e2285c69c2f9678c13945cad4471992919cfb38345052797b7f83",
- PwdEncrypter: "aes",
- EnableEncrypted: true,
- Endpoint: "endpoint",
- Insecure: false,
- }
- c, err := NewClientForAdmin(authOptions)
- if err != nil {
- t.Error("Test NewClientForAdmin failed")
- }
- if c.user != authOptions.AdminUsername {
- t.Error("Test NewClientForAdmin failed")
- }
- if c.password != "testadminpassword" {
- t.Error("Test NewClientForAdmin failed")
- }
- if c.endpoint != authOptions.Endpoint {
- t.Error("Test NewClientForAdmin failed")
- }
-}
-
-func TestNewClientForAdmin_Error(t *testing.T) {
- authOptions := &AuthOptions{
- Username: "testuser",
- Password: "d",
- AdminUsername: "testadminuser",
- AdminPassword: "testadminpassword",
- PwdEncrypter: "aes",
- EnableEncrypted: true,
- Endpoint: "endpoint",
- Insecure: false,
- }
- _, err := NewClientForAdmin(authOptions)
- if err == nil {
- t.Error("Test NewClientForAdmin failed")
- }
-}
-
-func TestDestroy(t *testing.T) {
- // create mock
- mockWriteCloser := new(MockWriteCloser)
- mockWriteCloser.On("Write", []byte{0x65, 0x78, 0x69, 0x74, 0xa}).Return(1, nil)
-
- client := &EternusClient{
- user: "testuser",
- password: "testpassword",
- endpoint: "testendpoint",
- stdin: mockWriteCloser,
- cliConfPath: "./config/cli_response.yml",
- }
-
- err := client.Destroy()
- if err != nil {
- t.Error(err)
- }
-}
-
-func TestSetConfig(t *testing.T) {
- client := &EternusClient{
- user: "testuser",
- password: "testpassword",
- endpoint: "testendpoint",
- cliConfPath: "./config/cli_response.yml",
- }
-
- config := client.setConfig()
- if config.User != client.user {
- t.Error("Test setConfig failed")
- }
-}
-
-func TestDoRequest(t *testing.T) {
- cmd := "show test"
- param := map[string]string{
- "a": "arg1",
- }
- execCmd := "show test -a arg1 \n"
- // create stdin mock
- mockWriteCloser := new(MockWriteCloser)
- mockWriteCloser.On("Write", []byte(execCmd)).Return(1, nil)
- mockWriteCloser.On("Write", []byte("\n")).Return(1, nil)
- // create stdout mock
- mockReadCloser := new(MockReadCloser)
- expectStr := "\r\nCLI> show test -a arg1\r\n00\r\nCLI> "
- buff := make([]byte, 65535)
- outStr := "\r\nCLI> "
- out := []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out)
- buff2 := make([]byte, 65535)
- for i, v := range out {
- buff2[i] = v
- }
- outStr = "show test -a arg1\r\n00\r\nCLI> "
- out = []byte(outStr)
- mockReadCloser.On("Read", buff2).Return(len(out), nil, out)
-
- client := &EternusClient{
- stdin: mockWriteCloser,
- stdout: mockReadCloser,
- cliConfPath: "./config/cli_response.yml",
- }
- ret, err := client.doRequest(cmd, param)
- if err != nil {
- t.Error("Test doRequest failed")
- }
- if ret.String() != expectStr {
- t.Error("Test doRequest failed")
- }
-}
-
-func TestDoRequest_Error(t *testing.T) {
- cmd := "show test"
- param := map[string]string{
- "a": "arg1",
- }
- execCmd := "show test -a arg1 \n"
- // create mock
- mockWriteCloser := new(MockWriteCloser)
- mockWriteCloser.On("Write", []byte(execCmd)).Return(1, nil)
- mockWriteCloser.On("Write", []byte("\n")).Return(1, nil)
- mockReadCloser := new(MockReadCloser)
- buff := make([]byte, 65535)
- mockReadCloser.On("Read", buff).Return(0, io.ErrClosedPipe, []byte{})
-
- client := &EternusClient{
- stdin: mockWriteCloser,
- stdout: mockReadCloser,
- cliConfPath: "./config/cli_response.yml",
- }
- _, err := client.doRequest(cmd, param)
- if err == nil {
- t.Error("Test doRequest failed")
- }
-}
-
-func TestRequest(t *testing.T) {
- colName := []string{"lun_group_no", "lun_group_name",
- "previous_affinity_group_no", "next_affinity_group_no",
- }
- data := [][]string{
- []string{"0000", "test", "FFFF", "FFFF"},
- []string{"0001", "test2", "FFFF", "FFFF"},
- []string{"0003", "test3", "FFFF", "FFFF"},
- []string{"0004", "test4", "FFFF", "FFFF"},
- }
- cmd := "show lun-groups"
- resultArray := [][]string{
- []string{"00"},
- []string{"0004"},
- }
- resultArray = append(resultArray, data...)
- client := &EternusClient{
- cliConfPath: "./config/cli_response.yml",
- }
- ret, err := client.parseResult(cmd, resultArray)
- if err != nil {
- t.Error("Test doRequest failed")
- }
- for k, e := range ret {
- for i, v := range colName {
- if data[k][i] != e[v] {
- t.Error("Test doRequest failed")
- }
- }
- }
-}
-
-func TestRequest_Dropdata(t *testing.T) {
- colName := []string{"tpp_number", "tpp_name", "disk_type",
- "nearline", "raid_level", "tpp_status", "total_capacity",
- "used_capacity", "alarm_status", "warning_level_range",
- "attention_level_range", "encryption_status", "dedup",
- "data_reduction_rate", "provisioned_capacity",
- "provisioned_rate", "dedup_status", "chunk_size"}
- cmd := "show thin-pro-pools"
- data := [][]string{
- []string{"0002", "osdstest", "01", "00", "04", "0010",
- "00000000666FC000", "0000000000000000", "01", "5A", "4B",
- "00", "00", "00", "0000000000200000", "00000001", "FF", "01"},
- }
- resultArray := [][]string{
- []string{"00"},
- []string{"00000000"},
- []string{"0001"},
- }
- resultArray = append(resultArray, data...)
- client := &EternusClient{
- cliConfPath: "./config/cli_response.yml",
- }
- ret, err := client.parseResult(cmd, resultArray)
- if err != nil {
- t.Error("Test doRequest failed")
- }
-
- for k, e := range ret {
- for i, v := range colName {
- if data[k][i] != e[v] {
- t.Error("Test doRequest failed")
- }
- }
- }
-}
-
-func TestRequest_Dropmultidata(t *testing.T) {
- colName := []string{"lun", "volume_no", "volume_name",
- "volume_raw_status", "volume_round_status", "volume_size", "uid",
- }
- cmd := "show lun-group"
- data := [][]string{
- []string{"0000", "0019", "170-10-vol0", "A000", "20", "0000000000000000", "60000000000000000000000000000000"},
- []string{"0001", "001A", "170-10-vol1", "A000", "20", "0000000000000000", "60000000000000000000000000000000"},
- }
- resultArray := [][]string{
- []string{"00"},
- []string{"0001"},
- []string{"0001", "170-10", "FFFF", "FFFF"},
- []string{"0002"},
- }
- resultArray = append(resultArray, data...)
- client := &EternusClient{
- cliConfPath: "./config/cli_response.yml",
- }
- ret, err := client.parseResult(cmd, resultArray)
- if err != nil {
- t.Error("Test doRequest failed")
- }
-
- for k, e := range ret {
- for i, v := range colName {
- if data[k][i] != e[v] {
- t.Error("Test doRequest failed")
- }
- }
- }
-}
-
-func TestRequest_ErrorStatus(t *testing.T) {
- cmd := "show lun-group"
- data := [][]string{
- []string{"0000", "0019", "170-10-vol0", "A000", "20", "0000000000000000", "60000000000000000000000000000000"},
- }
- resultArray := [][]string{
- []string{"01"},
- []string{"0001"},
- []string{"0001", "170-10", "FFFF", "FFFF"},
- []string{"0001"},
- }
- resultArray = append(resultArray, data...)
- client := &EternusClient{
- cliConfPath: "./config/cli_response.yml",
- }
- _, err := client.parseResult(cmd, resultArray)
- if err == nil {
- t.Error("Test doRequest failed")
- }
-}
-
-func TestRequest_ErrorNoStatus(t *testing.T) {
- cmd := "show lun-group"
- data := [][]string{
- []string{"0000", "0019", "170-10-vol0", "A000", "20", "0000000000000000", "60000000000000000000000000000000"},
- }
- resultArray := [][]string{
- []string{},
- []string{"0001"},
- []string{"0001", "170-10", "FFFF", "FFFF"},
- []string{"0001"},
- }
- resultArray = append(resultArray, data...)
- client := &EternusClient{
- cliConfPath: "./config/cli_response.yml",
- }
- _, err := client.parseResult(cmd, resultArray)
- if err == nil {
- t.Error("Test doRequest failed")
- }
-}
-
-func TestRequest_ErrorInconsistent(t *testing.T) {
- cmd := "show lun-group"
- data := [][]string{
- []string{"0000", "0019", "170-10-vol0", "A000", "20", "0000000000000000", "60000000000000000000000000000000"},
- }
- resultArray := [][]string{
- []string{},
- []string{"0001"},
- []string{"0001", "170-10", "FFFF", "FFFF"},
- []string{"0002"},
- }
- resultArray = append(resultArray, data...)
- client := &EternusClient{
- cliConfPath: "./config/cli_response.yml",
- }
- _, err := client.parseResult(cmd, resultArray)
- if err == nil {
- t.Error("Test doRequest failed")
- }
-}
-
-func TestGetData(t *testing.T) {
- client := &EternusClient{
- cliConfPath: "./config/cli_response.yml",
- }
- var adatas []interface{}
- e := map[interface{}]interface{}{
- "name": "col1",
- }
- adatas = append(adatas, e)
- e = map[interface{}]interface{}{
- "name": "col2",
- }
- adatas = append(adatas, e)
- e = map[interface{}]interface{}{
- "name": "col3",
- }
- adatas = append(adatas, e)
-
- result := []string{"a", "b", "c"}
- ret, err := client.getData(adatas, result)
- if err != nil {
- t.Error("Test getData failed")
- }
- if ret["col1"] != "a" {
- t.Error("Test getData failed")
- }
- if ret["col2"] != "b" {
- t.Error("Test getData failed")
- }
- if ret["col3"] != "c" {
- t.Error("Test getData failed")
- }
-}
-
-func TestGetData_Error(t *testing.T) {
- client := &EternusClient{
- cliConfPath: "./config/cli_response.yml",
- }
- var adatas []interface{}
- e := map[interface{}]interface{}{
- "name": "col1",
- }
- adatas = append(adatas, e)
- e = map[interface{}]interface{}{
- "name": "col2",
- }
- adatas = append(adatas, e)
-
- result := []string{"a"}
- _, err := client.getData(adatas, result)
- if err == nil {
- t.Error("Test getData failed")
- }
-}
-
-func TestConvStringArray(t *testing.T) {
- client := &EternusClient{
- cliConfPath: "./config/cli_response.yml",
- }
- cmd := "show test_cmd"
- resultString := "\r\nCLI> show test_cmd\r\n00\r\na\tb\tc\r\nCLI> "
- var resultByte bytes.Buffer
- resultByte.WriteString(resultString)
-
- ret := client.convStringArray(cmd, &resultByte)
-
- if len(ret[0]) != 1 || ret[0][0] != "00" {
- t.Error("Test convStringArray failed")
- }
-
- if len(ret[1]) != 3 || ret[1][0] != "a" ||
- ret[1][1] != "b" || ret[1][2] != "c" {
- t.Error("Test convStringArray failed")
- }
-}
-
-func createIOMock(cmd string, output string) *EternusClient {
- mockWriteCloser := new(MockWriteCloser)
- if cmd != "" {
- mockWriteCloser.On("Write", []byte(cmd)).Return(1, nil)
- } else {
- mockWriteCloser.On("Write", mock.Anything).Return(1, nil)
- }
- mockWriteCloser.On("Write", []byte("\n")).Return(1, nil)
- // create stdout mock
- mockReadCloser := new(MockReadCloser)
- buff := make([]byte, 65535)
- out := []byte(output)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out)
-
- client := &EternusClient{
- stdin: mockWriteCloser,
- stdout: mockReadCloser,
- cliConfPath: "./config/cli_response.yml",
- }
- return client
-}
-
-func TestGetVolume(t *testing.T) {
- execCmd := "show volumes -volume-number 1 \n"
- outStr := "\r\nCLI> show volumes -volume-number 1 \r\n"
- outStr += "00\r\n"
- outStr += "0001\r\n"
- outStr += "0012\tosds-643e8232-1b\tA000\t09\t00\t0002\tosdstest\t0000000000000000\t00\t00\t00000000\t0050\tFF\t00\tFF\tFF\t20\t00\tFFFF\t00\t60000000000000000000000000000000\t00\t00\tFF\tFF\tFFFFFFFF\t00\t00\tFF\t00\r\n"
- outStr += "CLI> "
- client := createIOMock(execCmd, outStr)
- ret, err := client.GetVolume("1")
- if err != nil {
- t.Error("Test doRequest failed")
- }
- if ret.Id != "18" && ret.Name != "osds-643e8232-1b" &&
- ret.Size != 1 && ret.Status != "00" &&
- ret.PoolName != "tosdstest" && ret.PoolId != "2" {
- t.Error("Test GetVolume failed")
- }
-}
-
-func TestGetVolumeByName(t *testing.T) {
- execCmd := "show volumes -volume-name osds-643e8232-1b \n"
- outStr := "\r\nCLI> show volumes -volume-name osds-643e8232-1b \r\n"
- outStr += "00\r\n"
- outStr += "0001\r\n"
- outStr += "0012\tosds-643e8232-1b\tA000\t09\t00\t0002\tosdstest\t0000000000000000\t00\t00\t00000000\t0050\tFF\t00\tFF\tFF\t20\t00\tFFFF\t00\t60000000000000000000000000000000\t00\t00\tFF\tFF\tFFFFFFFF\t00\t00\tFF\t00\r\n"
- outStr += "CLI> "
- client := createIOMock(execCmd, outStr)
- ret, err := client.GetVolumeByName("osds-643e8232-1b")
- if err != nil {
- t.Error("Test doRequest failed")
- }
- if ret.Id != "18" && ret.Name != "osds-643e8232-1b" &&
- ret.Size != 1 && ret.Status != "00" &&
- ret.PoolName != "tosdstest" && ret.PoolId != "2" {
- t.Error("Test GetVolume failed")
- }
-}
-
-func TestDeleteIscsiHost(t *testing.T) {
- name := "hostname"
-
- execCmd := "delete host-iscsi-name -host-name " + name + " \n"
- outStr := "\r\nCLI> " + execCmd + " \r\n"
- outStr += "00\r\n"
- outStr += "CLI> "
- client := createIOMock(execCmd, outStr)
- err := client.DeleteIscsiHostByName(name)
- if err != nil {
- t.Error("Test DeleteIscsiHostByName failed")
- }
-}
-
-func TestDeleteLunGroup(t *testing.T) {
- name := "hostname"
- execCmd := "delete lun-group -lg-name " + name + " \n"
- outStr := "\r\nCLI> " + execCmd + " \r\n"
- outStr += "00\r\n"
- outStr += "CLI> "
- client := createIOMock("", outStr)
- err := client.DeleteLunGroupByName(name)
- if err != nil {
- t.Error("Test DeleteLunGroupByName failed")
- }
-}
-
-func TestDeleteHostAffinity(t *testing.T) {
- port := "010"
- name := "hostname"
- execCmd := "release host-affinity -port " + port
- execCmd += " -host-name " + name + " -mode all" + " \n"
- outStr := "\r\nCLI> " + execCmd + " \r\n"
- outStr += "00\r\n"
- outStr += "CLI> "
- client := createIOMock("", outStr)
- err := client.DeleteHostAffinity(port, name)
- if err != nil {
- t.Error("Test DeleteHostAffinity failed")
- }
-}
-
-func TestDeleteFcHost(t *testing.T) {
- name := "hostname"
- execCmd := "delete host-wwn-name -host-name " + name + " \n"
- outStr := "\r\nCLI> " + execCmd + " \r\n"
- outStr += "00\r\n"
- outStr += "CLI> "
- client := createIOMock(execCmd, outStr)
- err := client.DeleteFcHostByName(name)
- if err != nil {
- t.Error("Test DeleteFcHost failed")
- }
-}
-
-func TestListMapping(t *testing.T) {
- port := "010"
-
- execCmd := "show mapping -port " + port + " \n"
- outStr := "\r\nCLI> " + execCmd + " \r\n"
- outStr += "00\r\n"
- outStr += "0001\r\n"
- outStr += "11005100\t00\r\n"
- outStr += "0001\r\n"
- outStr += "0000\t0001\tosds-643e8232-1b\tA000\t20\t0000000000000000\r\n"
- outStr += "CLI> "
- client := createIOMock(execCmd, outStr)
- mapping, err := client.ListMapping(port)
- if err != nil {
- t.Error("Test ListMapping failed")
- }
- for _, v := range mapping {
- if v.Lun != "0" && v.VolumeNumber != "1" &&
- v.VolumeName != "osds-643e8232-1b" && v.VolumeRawStatus != "A000" &&
- v.VolumeRoundStatus != "20" && v.VolumeSize != 1 {
- t.Error("Test ListMapping failed")
- }
- }
-}
-
-func TestCreateSnapshot(t *testing.T) {
- srcLunID := "0"
- destLunID := "1"
- execCmd := "start advanced-copy -source-volume-number " + srcLunID + " -destination-volume-number " + destLunID + " \n"
- outStr := "\r\nCLI> " + execCmd + " \r\n"
- outStr += "00\r\n"
- outStr += "CLI> "
- client := createIOMock("", outStr)
- err := client.CreateSnapshot(srcLunID, destLunID)
- if err != nil {
- t.Error("Test DeleteMapping failed")
- }
-}
-
-func TestListSnapshot(t *testing.T) {
- execCmd := "show advanced-copy-session -type sopc+ \n"
- outStr := "\r\nCLI> " + execCmd + " \r\n"
- outStr += "SID Gene- Type Volume Source Volume Destination Volume Status Phase Error Requestor\r\n"
- outStr += " ration Type No. Name No. Name Code\r\n"
- outStr += "----- ------- -------- --------- ----- -------------------------------- ----- -------------------------------- ------------- ---------------- ------ ---------\r\n"
- outStr += " 50 1/ 1 SnapOPC+ Standard 2 testvol0 10 testvol1 Active Copying 0x00 GUI\r\n"
- outStr += "CLI> "
- client := createIOMock("", outStr)
- snapshots, err := client.ListSnapshot()
- if err != nil {
- t.Error("Test ListSnapshot failed")
- }
- for _, v := range snapshots {
- if v.Sid != "50" && v.Gen != "1/" &&
- v.GenTotal != "1" && v.Type != "SnapOPC+" &&
- v.VolumeType != "Standard" && v.SrcNo != "2" &&
- v.SrcName != "testvol0" && v.DestNo != "10" &&
- v.DestName != "testvol1" && v.Status != "Active" &&
- v.Phase != "Copying" && v.ErrorCode != "0x00" &&
- v.Requestor != "GUI" {
- t.Error("Test ListSnapshot failed")
- }
- }
-}
-
-func TestDeleteSnapshot(t *testing.T) {
- sid := "0!"
- execCmd := "stop advanced-copy -session-id " + sid + " \n"
- outStr := "\r\nCLI> " + execCmd + " \r\n"
- outStr += "00\r\n"
- outStr += "CLI> "
- client := createIOMock("", outStr)
- err := client.DeleteSnapshot(sid)
- if err != nil {
- t.Error("Test DeleteSnapshot failed")
- }
-}
-
-func TestDeleteSnapshot_AlreadyDelete(t *testing.T) {
- sid := "0!"
- execCmd := "stop advanced-copy -session-id " + sid + " \n"
- outStr := "\r\nCLI> " + execCmd + " \r\n"
- outStr += "Error: E0110 Resource does not exist.\r\n"
- outStr += "CLI> "
- client := createIOMock("", outStr)
- err := client.DeleteSnapshot(sid)
- if err != nil {
- t.Error("Test DeleteSnapshot failed")
- }
-}
-
-func TestDeleteSnapshot_Error(t *testing.T) {
- sid := "0"
- execCmd := "stop advanced-copy -session-id " + sid + " \n"
- outStr := "\r\nCLI> " + execCmd + " \r\n"
- outStr += "Error: E0112 XXXXXXXXXXXXX.\r\n"
- outStr += "CLI> "
- client := createIOMock(execCmd, outStr)
- err := client.DeleteSnapshot(sid)
- if err == nil {
- t.Error("Test DeleteSnapshot failed")
- }
-}
diff --git a/contrib/drivers/fujitsu/eternus/common.go b/contrib/drivers/fujitsu/eternus/common.go
deleted file mode 100644
index 49da66c2d..000000000
--- a/contrib/drivers/fujitsu/eternus/common.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package eternus
-
-import (
- "hash/fnv"
- "net"
- "strconv"
- "strings"
-
- . "github.com/opensds/opensds/contrib/drivers/utils/config"
-)
-
-type AuthOptions struct {
- Username string `yaml:"username,omitempty"`
- Password string `yaml:"password,omitempty"`
- AdminUsername string `yaml:"adminUsername,omitempty"`
- AdminPassword string `yaml:"adminPassword,omitempty"`
- PwdEncrypter string `yaml:"PwdEncrypter,omitempty"`
- EnableEncrypted bool `yaml:"EnableEncrypted,omitempty"`
- Endpoint string `yaml:"endpoint,omitempty"`
- Insecure bool `yaml:"insecure,omitempty"`
-}
-
-type Replication struct {
- RemoteAuthOpt AuthOptions `yaml:"remoteAuthOptions"`
-}
-
-type EternusConfig struct {
- AuthOptions `yaml:"authOptions"`
- Replication `yaml:"replication"`
- Pool map[string]PoolProperties `yaml:"pool,flow"`
- TargetIp string `yaml:"targetIp,omitempty"`
- CeSupport bool `yaml:"ceSupport,omitempty"`
-}
-
-func IsIPv4(ip string) bool {
- return true
-}
-
-// GetPortNumber
-func GetPortNumber(caModuleId string, portNumber string) string {
- caInt, _ := strconv.ParseInt(caModuleId[0:1], 16, 64)
- cmInt, _ := strconv.ParseInt(caModuleId[1:2], 16, 64)
- portInt, _ := strconv.ParseInt(portNumber[1:2], 16, 64)
- cm := cmInt % 8
- ca := caInt % 4
- port := (caInt/4/2)*2 + portInt%2
- ret := strconv.FormatInt(cm, 16) + strconv.FormatInt(ca, 16) + strconv.FormatInt(port, 16)
- return ret
-}
-
-// GetPortNumberV2
-func GetPortNumberV2(caModuleId string, portNumber string) string {
- var base int64 = 4
- caInt, _ := strconv.ParseInt(caModuleId[0:1], 16, 64)
- cmInt, _ := strconv.ParseInt(caModuleId[1:2], 16, 64)
- portInt, _ := strconv.ParseInt(portNumber[1:2], 16, 64)
- ce := ((caInt % 8) / 2) + (((cmInt - base) / 4) * 4)
- cm := cmInt % 8
- ca := caInt % 4
- port := (caInt/4/2)*2 + portInt%2
- ret := strconv.FormatInt(ce, 16) + strconv.FormatInt(cm, 16) + strconv.FormatInt(ca, 16) + strconv.FormatInt(port, 16)
- return ret
-}
-
-// ParseIPv4 : convert hex string to ip address
-func ParseIPv4(ip string) string {
- ipStr := []string{}
- for i := 0; i < len(ip); i += 2 {
- tmpIP, _ := strconv.ParseInt(ip[i:i+2], 16, 64)
- ipStr = append(ipStr, strconv.FormatInt(tmpIP, 10))
- }
- return strings.Join(ipStr, ".")
-}
-
-// ParseIPv6 : convert hex string to ip address
-func ParseIPv6(ip string) string {
- ipStr := ip[:4] + ":" + ip[4:8] + ":" + ip[8:12] + ":" + ip[12:16] + ":"
- ipStr += ip[16:20] + ":" + ip[20:24] + ":" + ip[24:28] + ":" + ip[28:32]
- return ipStr
-}
-
-// EqualIP : if ip address is eauql return ture
-func EqualIP(ip1 string, ip2 string) bool {
- return net.ParseIP(ip1).Equal(net.ParseIP(ip2))
-}
-
-// GetFnvHash :
-func GetFnvHash(str string) string {
- hash := fnv.New64()
- hash.Write([]byte(str))
- val := hash.Sum64()
- return strconv.FormatUint(uint64(val), 16)
-}
diff --git a/contrib/drivers/fujitsu/eternus/common_test.go b/contrib/drivers/fujitsu/eternus/common_test.go
deleted file mode 100644
index d30f68d8d..000000000
--- a/contrib/drivers/fujitsu/eternus/common_test.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package eternus
-
-import (
- "testing"
-)
-
-func TestIsIPv4(t *testing.T) {
- ip := "192.168.1.1"
- expect := true
- result := IsIPv4(ip)
- if result != expect {
- t.Error("Test IsIPv4 failed")
- }
-}
-
-func TestGetPortNumber(t *testing.T) {
- caModuleID := "50"
- portNumber := "00"
- expect := "010"
- result := GetPortNumber(caModuleID, portNumber)
- if result != expect {
- t.Error("Test GetPortNumber failed")
- }
-}
-
-func TestGetPortNumberV2(t *testing.T) {
- caModuleID := "11"
- portNumber := "00"
- expect := "0110"
- result := GetPortNumberV2(caModuleID, portNumber)
- if result != expect {
- t.Error("Test GetPortNumberV2 failed")
- }
-}
-
-func TestParseIPv4(t *testing.T) {
- ip := "C0A80CE3"
- expect := "192.168.12.227"
- result := ParseIPv4(ip)
- if result != expect {
- t.Error("Test ParseIPv4 failed")
- }
-}
-
-func TestParseIPv6(t *testing.T) {
- ip := "FE800000000000001111222233334444"
- expect := "FE80:0000:0000:0000:1111:2222:3333:4444"
- result := ParseIPv6(ip)
- if result != expect {
- t.Error("Test ParseIPv6 failed")
- }
-}
-
-func TestEqualIP(t *testing.T) {
- ip := "FE80::1111:2222:3333:4444"
- ip2 := "FE80:0000:0000:0000:1111:2222:3333:4444"
- expect := true
- result := EqualIP(ip, ip2)
- if result != expect {
- t.Error("Test EqualIP failed")
- }
-
- ip = "FE80::1111:2222:3333:4444"
- ip2 = "FE80:0000:0000:0000:1111:2222:3333:5555"
- expect = false
- result = EqualIP(ip, ip2)
- if result != expect {
- t.Error("Test EqualIP failed")
- }
-}
-
-func TestGetFnvHash(t *testing.T) {
- str := "teststring"
- expect := "94dd691390fdc1da"
- result := GetFnvHash(str)
- if result != expect {
- t.Error("Test GetFnvHash failed")
- }
-}
diff --git a/contrib/drivers/fujitsu/eternus/config/cli_response.yml b/contrib/drivers/fujitsu/eternus/config/cli_response.yml
deleted file mode 100644
index 1d68bc6fd..000000000
--- a/contrib/drivers/fujitsu/eternus/config/cli_response.yml
+++ /dev/null
@@ -1,497 +0,0 @@
-cli:
- show_thin-pro-pools:
- - type: status
- data:
- - name: status
- byte: 1
- - type: data
- drop: true
- data:
- - name: available_logical_capacity
- byte: 4
- - type: count
- data:
- - name: tpp_count
- byte: 2
- - type: multiple_data
- data:
- - name: tpp_number
- byte: 2
- - name: tpp_name
- byte: 16
- - name: disk_type
- byte: 1
- - name: nearline
- byte: 1
- - name: raid_level
- byte: 1
- - name: tpp_status
- byte: 2
- - name: total_capacity
- byte: 8
- - name: used_capacity
- byte: 8
- - name: alarm_status
- byte: 1
- - name: warning_level_range
- byte: 1
- - name: attention_level_range
- byte: 1
- - name: encryption_status
- byte: 1
- - name: dedup
- byte: 1
- - name: data_reduction_rate
- byte: 1
- - name: provisioned_capacity
- byte: 8
- - name: provisioned_rate
- byte: 4
- - name: dedup_status
- byte: 1
- - name: chunk_size
- byte: 1
- show_volumes:
- - type: status
- data:
- - name: status
- byte: 1
- - type: count
- data:
- - name: volume_count
- byte: 2
- - type: multiple_data
- data:
- - name: volume_number
- byte: 2
- - name: volume_name
- byte: 32
- - name: volume_status
- byte: 2
- - name: volume_type
- byte: 1
- - name: encryption
- byte: 1
- - name: pool_number # RAID Group/External RAID Group/TPP/FTRP number
- byte: 2
- - name: pool_name # RAID Group/External RAID Group/TPP/FTRP name
- byte: 16
- - name: size
- byte: 8
- - name: copy_protection
- byte: 1
- - name: alarm # TPV/FTV Alarm
- byte: 1
- - name: used_capacity # TPV/FTV Used Capacity
- byte: 8
- - name: threshold # TPV/FTV Threshold
- byte: 2
- - name: sdpv_deletion_flag
- byte: 1
- - name: sdpv_resolution
- byte: 1
- - name: sdp_id
- byte: 1
- - name: domain_number
- byte: 1
- - name: dummy_volume
- byte: 1
- - name: balancing_level
- byte: 1
- # omitted other definitions. above definitions are enough for osds.
- create_volume:
- - type: status
- data:
- - name: status
- byte: 1
- - type: count
- data:
- - name: volume_count
- byte: 2
- - type: multiple_data
- data:
- - name: volume_number
- byte: 2
- delete_volume:
- - type: status
- data:
- - name: status
- byte: 1
- expand_volume:
- - type: status
- data:
- - name: status
- byte: 1
- show_host-iscsi-names:
- - type: status
- data:
- - name: status
- byte: 1
- - type: count
- data:
- - name: host_iscsi_count
- byte: 2
- - type: multiple_data
- data:
- - name: host_iscsi_number
- byte: 2
- - name: host_iscsi_nickname
- byte: 16
- - name: host_response_number
- byte: 2
- - name: host_response_name
- byte: 16
- - name: host_iscsi_ip_address
- byte: 16
- - name: host_iscsi_name
- byte: 223
- - name: ip_version
- byte: 1
- show_host-wwn-names:
- - type: status
- data:
- - name: status
- byte: 1
- - type: count
- data:
- - name: host_fc_count
- byte: 2
- - type: multiple_data
- data:
- - name: host_wwn_no
- byte: 2
- - name: host_wwn_alias_name
- byte: 16
- - name: host_wwn_name
- byte: 8
- - name: host_response_no
- byte: 2
- - name: host_response_name
- byte: 16
- create_host-iscsi-name:
- - type: status
- data:
- - name: status
- byte: 1
- - type: data
- data:
- - name: host_number
- byte: 2
- - type: count
- data:
- - name: host_group_count
- byte: 2
- - type: multiple_data
- data:
- - name: host_group_number
- byte: 2
- delete_host-iscsi-name:
- - type: status
- data:
- - name: status
- byte: 1
- create_host-wwn-name:
- - type: status
- data:
- - name: status
- byte: 1
- - type: data
- data:
- - name: host_number
- byte: 2
- - type: count
- data:
- - name: host_group_count
- byte: 2
- - type: multiple_data
- data:
- - name: host_group_number
- byte: 2
- delete_host-wwn-name:
- - type: status
- data:
- - name: status
- byte: 1
- create_lun-group:
- - type: status
- data:
- - name: status
- byte: 1
- - type: data
- data:
- - name: lun_group_number
- byte: 2
- delete_lun-group:
- - type: status
- data:
- - name: status
- byte: 1
- set_lun-group:
- - type: status
- data:
- - name: status
- byte: 1
- - type: count
- data:
- - name: lun_group_count
- byte: 2
- - type: multiple_data
- data:
- - name: lun
- byte: 2
- show_iscsi-parameters:
- - type: status
- data:
- - name: status
- byte: 1
- - type: count
- data:
- - name: iscsi-ca_port_count
- byte: 1
- - type: multiple_data
- data:
- - name: ca_module_id
- byte: 1
- - name: port_number
- byte: 1
- - name: port_mode
- byte: 1
- - name: host_affinity
- byte: 1
- - name: iscsi_name
- byte: 224
- - name: alias_name
- byte: 31
- - name: host_response_no
- byte: 1
- - name: host_response_name
- byte: 16
- - name: reset_scope
- byte: 1
- - name: reserve_cancel
- byte: 1
- - name: ip_address
- byte: 16
- - name: subnet_mask
- byte: 16
- - name: gateway_address
- byte: 16
- - name: mac_address
- byte: 6
- - name: tcp_port_number
- byte: 2
- - name: tcp_window_size
- byte: 1
- - name: iscsi_flag
- byte: 4
- - name: iscsi_option_flag
- byte: 2
- - name: isns_server_ip_address
- byte: 40
- - name: isns_server_port_number
- byte: 2
- - name: chap_user_name
- byte: 256
- - name: header_digest
- byte: 1
- - name: data_digest
- byte: 1
- - name: transfer_rate
- byte: 1
- - name: link_status
- byte: 2
- - name: cmdsn_count
- byte: 1
- - name: vlan_id
- byte: 2
- - name: mtu_size
- byte: 2
- - name: bandwidth_limit
- byte: 2
- - name: chap_user_name_ra
- byte: 64
- - name: link_local_ip_address
- byte: 40
- - name: connect_ip_address
- byte: 40
- - name: ipv6_gateway_address
- byte: 40
- - name: rec_line_no.
- byte: 1
- - name: rec_transfer_mode
- byte: 1
- - name: ip_flag
- byte: 4
- - name: session_kind
- byte: 1
- - name: tfo_pair_status
- byte: 1
- show_fc-parameters:
- - type: status
- data:
- - name: status
- byte: 1
- - type: count
- data:
- - name: fc-ca_port_count
- byte: 1
- - type: multiple_data
- data:
- - name: ca_module_id
- byte:
- - name: port_number
- byte:
- - name: port_mode
- byte:
- - name: connection
- byte:
- - name: loop_id_assign
- byte:
- - name: loop_id
- byte:
- - name: transfer_rate
- byte:
- - name: frame_size
- byte:
- - name: host_affinity
- byte:
- - name: host_response_no
- byte:
- - name: host_response_name
- byte:
- - name: reset_scope
- byte:
- - name: reserve_cancel
- byte:
- - name: rec_line_no
- byte:
- - name: rec_transfer_mode
- byte:
- - name: session_kind
- byte:
- - name: wwn_mode
- byte:
- - name: wwpn
- byte:
- - name: tfo_wwpn
- byte:
- - name: tfo_pair_status
- byte:
- set_host-affinity:
- - type: status
- data:
- - name: status
- byte: 1
- - type: data
- data:
- - name: lun_mask_group_no
- byte: 2
- release_host-affinity:
- - type: status
- data:
- - name: status
- byte: 1
- show_lun-group:
- - type: status
- data:
- - name: status
- byte: 1
- - type: count
- if_zero_skip_all: true
- data:
- - name: lun_group_count
- byte: 2
- - type: multiple_data
- drop: true
- data:
- - name: lun_group_no
- byte: 2
- - name: lun_group_name
- byte: 16
- - name: previous_affinity_group_no
- byte: 2
- - name: next_affinity_group_no
- byte: 2
- - type: count
- data:
- - name: lun_count
- byte: 2
- - type: multiple_data
- data:
- - name: lun
- byte: 2
- - name: volume_no
- byte: 2
- - name: volume_name
- byte: 16
- - name: volume_raw_status
- byte: 2
- - name: volume_round_status
- byte: 1
- - name: volume_size
- byte: 8
- - name: uid
- byte: 16
- show_lun-groups:
- - type: status
- data:
- - name: status
- byte: 1
- - type: count
- data:
- - name: lun_group_count
- byte: 2
- - type: multiple_data
- data:
- - name: lun_group_no
- byte: 2
- - name: lun_group_name
- byte: 16
- - name: previous_affinity_group_no
- byte: 2
- - name: next_affinity_group_no
- byte: 2
- show_mapping:
- - type: status
- data:
- - name: status
- byte: 1
- - type: count
- data:
- - name: mapping_port_count
- byte: 2
- - type: multiple_data
- drop: true
- data:
- - name: mapping_port
- byte: 2
- - name: affinity_mode
- byte: 2
- - type: count
- data:
- - name: lun_count
- byte: 2
- - type: multiple_data
- data:
- - name: lun
- byte: 2
- - name: volume_number
- byte: 2
- - name: volume_name
- byte: 2
- - name: volume_raw_status
- byte: 2
- - name: volume_round_status
- byte: 2
- - name: volume_size
- byte: 2
- set_mapping:
- - type: status
- data:
- - name: status
- byte: 1
- release_mapping:
- - type: status
- data:
- - name: status
- byte: 1
diff --git a/contrib/drivers/fujitsu/eternus/constants.go b/contrib/drivers/fujitsu/eternus/constants.go
deleted file mode 100644
index a0380d5a4..000000000
--- a/contrib/drivers/fujitsu/eternus/constants.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package eternus
-
-// define config parameter
-const (
- defaultConfPath = "/etc/opensds/driver/fujitsu_eternus.yaml"
- defaultCliConfPath = "/etc/opensds/driver/cli_response.yml"
- defaultAZ = "default"
- SSHPort = "22"
-)
-
-// define eternus specific parameter
-const (
- LBASize = 2097152
-)
-
-// define command
-const (
- CreateVolume = "create volume"
-)
-
-// define error code
-const (
- NotFound = "0110"
-)
-
-// response key name
-const (
- KLunId = "eternusVolId"
- KLunGrpId = "eternusLunGrpId"
- KLunMaskGrpNo = "eternusLunMaskGrpNo"
- KHostId = "eternusHostId"
- KSnapId = "eternusSnapshotId"
- KSnapLunId = "eternusSnapshotVolId"
-)
diff --git a/contrib/drivers/fujitsu/eternus/eternus.go b/contrib/drivers/fujitsu/eternus/eternus.go
deleted file mode 100644
index f0b968489..000000000
--- a/contrib/drivers/fujitsu/eternus/eternus.go
+++ /dev/null
@@ -1,635 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package eternus
-
-import (
- "errors"
- "fmt"
- "os"
- "strconv"
-
- log "github.com/golang/glog"
- "github.com/opensds/opensds/contrib/drivers/utils"
- . "github.com/opensds/opensds/contrib/drivers/utils/config"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
-
- "github.com/opensds/opensds/pkg/utils/config"
- uuid "github.com/satori/go.uuid"
-)
-
-// Driver
-type Driver struct {
- conf *EternusConfig
- client *EternusClient
-}
-
-// Setup eternus driver
-func (d *Driver) Setup() (err error) {
- // Read fujitsu eternus config file
- conf := &EternusConfig{}
- d.conf = conf
- path := config.CONF.OsdsDock.Backends.FujitsuEternus.ConfigPath
-
- if "" == path {
- path = defaultConfPath
- }
- Parse(conf, path)
- d.client, err = NewClient(&d.conf.AuthOptions)
- if err != nil {
- log.Errorf("failed to get new client, %v", err)
- return err
- }
- err = d.client.login()
- if err != nil {
- log.Errorf("failed to login, %v", err)
- return err
- }
- return nil
-}
-
-// Unset eternus driver
-func (d *Driver) Unset() error {
- d.client.Destroy()
- return nil
-}
-
-// ListPools : get pool list
-func (d *Driver) ListPools() ([]*model.StoragePoolSpec, error) {
- var pols []*model.StoragePoolSpec
- sp, err := d.client.ListStoragePools()
- if err != nil {
- return nil, err
- }
- for _, p := range sp {
- c := d.conf
- if _, ok := c.Pool[p.Name]; !ok {
- continue
- }
-
- host, _ := os.Hostname()
- name := fmt.Sprintf("%s:%s:%s", host, d.conf.Endpoint, p.Id)
- pol := &model.StoragePoolSpec{
- BaseModel: &model.BaseModel{
- Id: uuid.NewV5(uuid.NamespaceOID, name).String(),
- },
- Name: p.Name,
- TotalCapacity: p.TotalCapacity,
- FreeCapacity: p.FreeCapacity,
- StorageType: c.Pool[p.Name].StorageType,
- Extras: c.Pool[p.Name].Extras,
- AvailabilityZone: c.Pool[p.Name].AvailabilityZone,
- }
- if pol.AvailabilityZone == "" {
- pol.AvailabilityZone = defaultAZ
- }
- pols = append(pols, pol)
- }
- return pols, nil
-}
-
-func (d *Driver) createVolumeFromSnapshot(opt *pb.CreateVolumeOpts) (*model.VolumeSpec, error) {
- return nil, &model.NotImplementError{"method createVolumeFromSnapshot is not implement."}
-}
-
-// CreateVolume : create volume.
-func (d *Driver) CreateVolume(opt *pb.CreateVolumeOpts) (*model.VolumeSpec, error) {
- log.Infof("start creating volume. opt = %v", opt)
- if opt.GetSnapshotId() != "" {
- return d.createVolumeFromSnapshot(opt)
- }
-
- id := opt.GetId()
- desc := opt.GetDescription()
- provPolicy := d.conf.Pool[opt.GetPoolName()].Extras.DataStorage.ProvisioningPolicy
- // execute create volume
- vol, err := d.client.CreateVolume(id, opt.GetSize(), desc, opt.GetPoolName(), provPolicy)
- if err != nil {
- log.Error("create Volume Failed:", err)
- return nil, err
- }
- log.Infof("create volume %s (%s) success.", opt.GetName(), vol.Id)
- return &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Size: opt.GetSize(),
- Description: opt.GetDescription(),
- AvailabilityZone: opt.GetAvailabilityZone(),
- Metadata: map[string]string{
- KLunId: vol.Id,
- },
- }, nil
-}
-
-// PullVolume : get volume information
-func (d *Driver) PullVolume(id string) (*model.VolumeSpec, error) {
- return nil, &model.NotImplementError{"method PullVolume is not implement."}
-}
-
-// DeleteVolume : delete volume
-func (d *Driver) DeleteVolume(opt *pb.DeleteVolumeOpts) error {
- log.Infof("start delete volume. opt = %v", opt)
- volID := opt.GetMetadata()[KLunId]
- err := d.client.DeleteVolume(volID)
- if err != nil {
- log.Error("remove Volume Failed:", err)
- return err
- }
- log.Infof("delete volume (%s) success.", opt.GetId())
- return nil
-}
-
-// ExtendVolume : extend volume
-func (d *Driver) ExtendVolume(opt *pb.ExtendVolumeOpts) (*model.VolumeSpec, error) {
- log.Infof("start extend volume. opt = %v", opt)
-
- volID := opt.GetMetadata()[KLunId]
- // execute extend volume
- err := d.client.ExtendVolume(volID, opt.GetSize())
- if err != nil {
- log.Error("extend Volume Failed:", err)
- return nil, err
- }
-
- log.Infof("extend volume %s (%s) success.", opt.GetName(), volID)
- return &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Size: opt.GetSize(),
- Description: opt.GetDescription(),
- AvailabilityZone: opt.GetAvailabilityZone(),
- }, nil
-}
-
-// InitializeConnection :
-func (d *Driver) InitializeConnection(opt *pb.CreateVolumeAttachmentOpts) (*model.ConnectionInfo, error) {
- if opt.GetAccessProtocol() == ISCSIProtocol {
- return d.initializeConnectionIscsi(opt)
- }
- if opt.GetAccessProtocol() == FCProtocol {
- return d.initializeConnectionFC(opt)
- }
- return nil, errors.New("no supported protocol for eternus driver")
-}
-
-func (d *Driver) initializeConnectionIscsi(opt *pb.CreateVolumeAttachmentOpts) (*model.ConnectionInfo, error) {
-
- var err error
- lunID := opt.GetMetadata()[KLunId]
- hostInfo := opt.GetHostInfo()
- hostID := ""
- lunGrpID := ""
- hostLun := ""
- hostExist := false
-
- // check initiator is specified
- initiator := utils.GetInitiatorName(hostInfo.GetInitiators(), opt.GetAccessProtocol())
- needHostAffinity := true
- if initiator == "" {
- needHostAffinity = false
- }
-
- // Get port info
- iscsiPortInfo, err := d.client.GetIscsiPortInfo(d.conf.CeSupport, needHostAffinity)
- if err != nil {
- log.Errorf("get iscsi port failed. error: %v", err)
- return nil, err
- }
-
- // Create host if not exist.
- if needHostAffinity {
- // Create resource name
- initiator := utils.GetInitiatorName(hostInfo.GetInitiators(), opt.GetAccessProtocol())
- ipAddr := hostInfo.GetIp()
- rscName := GetFnvHash(initiator + ipAddr)
- hostID, hostExist, err = d.client.AddIscsiHostWithCheck(rscName, initiator, ipAddr)
- if err != nil {
- log.Errorf("failed to add host, hostInfo =%v, error: %v", hostInfo, err)
- return nil, err
- }
-
- // Create Lun group
- lunGrpID, err = d.client.AddLunGroupWithCheck(rscName, lunID)
- if err != nil {
- log.Errorf("failed to add lun group, lun group name =%s, error: %v", rscName, err)
- return nil, err
- }
- // skip AddHostAffinity if host already exists.
- if !hostExist {
- // Create host affinity
- _, err = d.client.AddHostAffinity(lunGrpID, hostID, iscsiPortInfo.PortNumber)
- if err != nil {
- log.Errorf("failed to add host affinity, lunGrp id=%s, hostID=%s, error: %v",
- lunGrpID, hostID, err)
- return nil, err
- }
- }
- hostLun, err = d.client.GetHostLunID(lunGrpID, lunID)
- if err != nil {
- log.Error("failed to get the host lun id,", err)
- return nil, err
- }
- } else {
- hostLun, err = d.addMapping(iscsiPortInfo.PortNumber, lunID)
- if err != nil {
- return nil, err
- }
- }
-
- log.Infof("initialize iscsi connection (%s) success.", opt.GetId())
- targetLun, _ := strconv.Atoi(hostLun)
- connInfo := &model.ConnectionInfo{
- DriverVolumeType: ISCSIProtocol,
- ConnectionData: map[string]interface{}{
- "targetDiscovered": true,
- "targetIQN": []string{iscsiPortInfo.IscsiName},
- "targetPortal": []string{iscsiPortInfo.Ip + ":" + strconv.Itoa(iscsiPortInfo.TcpPort)},
- "discard": false,
- "targetLun": targetLun,
- },
- }
- return connInfo, nil
-}
-
-func (d *Driver) initializeConnectionFC(opt *pb.CreateVolumeAttachmentOpts) (*model.ConnectionInfo, error) {
-
- var err error
- lunID := opt.GetMetadata()[KLunId]
- hostInfo := opt.GetHostInfo()
- hostID := ""
- lunGrpID := ""
- hostLun := ""
- hostExist := false
-
- // check initiator is specified
- initiator := utils.GetInitiatorName(hostInfo.GetInitiators(), opt.GetAccessProtocol())
- needHostAffinity := true
- if initiator == "" {
- needHostAffinity = false
- }
-
- // Get port info
- fcPortInfo, err := d.client.GetFcPortInfo(d.conf.CeSupport, needHostAffinity)
- if err != nil {
- log.Errorf("failed to get fc port. error: %v", err)
- return nil, err
- }
-
- // initiator is specified
- if needHostAffinity {
- wwnName := utils.GetInitiatorName(hostInfo.GetInitiators(), opt.GetAccessProtocol())
- rscName := GetFnvHash(wwnName)
- // Create host if not exist.
- hostID, hostExist, err = d.client.AddFcHostWithCheck(rscName, wwnName)
- if err != nil {
- log.Errorf("failed to add host, host name =%s, error: %v", hostInfo.Host, err)
- return nil, err
- }
- // Create Lun group
- lunGrpID, err = d.client.AddLunGroupWithCheck(rscName, lunID)
- if err != nil {
- log.Errorf("failed to add lun group, lun group name =%s, error: %v", hostInfo.Host, err)
- return nil, err
- }
- // skip AddHostAffinity if host already exists.
- if !hostExist {
- // Create host affinity
- _, err = d.client.AddHostAffinity(lunGrpID, hostID, fcPortInfo.PortNumber)
- if err != nil {
- log.Errorf("failed to add host affinity, lunGrp id=%s, hostId=%s, error: %v",
- lunGrpID, hostID, err)
- return nil, err
- }
- }
- hostLun, err = d.client.GetHostLunID(lunGrpID, lunID)
- if err != nil {
- log.Error("failed to get the host lun id,", err)
- return nil, err
- }
- } else {
- hostLun, err = d.addMapping(fcPortInfo.PortNumber, lunID)
- if err != nil {
- return nil, err
- }
- }
-
- log.Infof("initialize fc connection (%s) success.", opt.GetId())
-
- targetLun, _ := strconv.Atoi(hostLun)
- fcInfo := &model.ConnectionInfo{
- DriverVolumeType: FCProtocol,
- ConnectionData: map[string]interface{}{
- "targetDiscovered": true,
- "targetWWNs": []string{fcPortInfo.Wwpn},
- "hostName": opt.GetHostInfo().Host,
- "targetLun": targetLun,
- },
- }
- return fcInfo, nil
-}
-
-func (d *Driver) addMapping(PortNumber string, lunID string) (string, error) {
- hostLunID := "0"
- // get exist mapping
- mappings, err := d.client.ListMapping(PortNumber)
- if err != nil {
- log.Error("failed to get mapping,", err)
- return "", err
- }
- // get unused host lun id
- if len(mappings) >= 1024 {
- msg := "reached the upper limit to add mapping"
- log.Error("failed to get host lun id,", msg)
- return "", errors.New(msg)
- }
- for i, v := range mappings {
- if v.Lun != strconv.Itoa(i) {
- hostLunID = strconv.Itoa(i)
- break
- }
- hostLunID = strconv.Itoa(i + 1)
- }
- // add mapping
- err = d.client.AddMapping(lunID, hostLunID, PortNumber)
- if err != nil {
- log.Error("failed to add mapping,", err)
- return "", err
- }
- return hostLunID, nil
-}
-
-func (d *Driver) deleteMapping(PortNumber string, lunID string) error {
- hostLunID := ""
- // get exist mapping
- mappings, err := d.client.ListMapping(PortNumber)
- if err != nil {
- log.Error("failed to get mapping,", err)
- return err
- }
- for _, v := range mappings {
- if v.VolumeNumber == lunID {
- hostLunID = v.Lun
- break
- }
- }
- if hostLunID == "" {
- log.Infof("specified mapping already deleted, PortNumber = %s, lunID =%s", PortNumber, lunID)
- return nil
- }
- // add mapping
- err = d.client.DeleteMapping(hostLunID, PortNumber)
- if err != nil {
- log.Error("failed to delete mapping,", err)
- return err
- }
- return nil
-}
-
-// TerminateConnection :
-func (d *Driver) TerminateConnection(opt *pb.DeleteVolumeAttachmentOpts) error {
-
- if opt.GetAccessProtocol() != ISCSIProtocol &&
- opt.GetAccessProtocol() != FCProtocol {
- return errors.New("no supported protocol for eternus driver")
- }
-
- lunID := opt.GetMetadata()[KLunId]
- hostInfo := opt.GetHostInfo()
- initiator := utils.GetInitiatorName(hostInfo.GetInitiators(), opt.GetAccessProtocol())
- needHostAffinity := true
- if initiator == "" {
- needHostAffinity = false
- }
-
- // Get port info
- portNumber := ""
- if opt.GetAccessProtocol() == ISCSIProtocol {
- iscsiPortInfo, err := d.client.GetIscsiPortInfo(d.conf.CeSupport, needHostAffinity)
- if err != nil {
- log.Errorf("failed to get iscsi port. error: %v", err)
- return err
- }
- portNumber = iscsiPortInfo.PortNumber
- } else if opt.GetAccessProtocol() == FCProtocol {
- fcPortInfo, err := d.client.GetFcPortInfo(d.conf.CeSupport, needHostAffinity)
- if err != nil {
- log.Errorf("failed to get fc port. error: %v", err)
- return err
- }
- portNumber = fcPortInfo.PortNumber
- }
-
- // if no need to delete host affinity, delete mapping
- if needHostAffinity != true {
- err := d.deleteMapping(portNumber, lunID)
- if err != nil {
- log.Errorf("failed to delete mapping. error: %v", err)
- return err
- }
- log.Infof("terminate connection (%s) success.", opt.GetId())
- return nil
- }
-
- // Create resource name
- rscName := ""
- if opt.GetAccessProtocol() == ISCSIProtocol {
- ipAddr := hostInfo.GetIp()
- rscName = GetFnvHash(initiator + ipAddr)
- } else {
- rscName = GetFnvHash(initiator)
- }
-
- // Get lun group
- lg, err := d.client.GetLunGroupByName(rscName)
- if err != nil {
- log.Errorf("failed to get lun group, error: %v", err)
- return err
- }
- // if lun group has some volumes.
- if len(lg.Volumes) > 1 {
- hostLunID := ""
- for _, v := range lg.Volumes {
- if v.Id == lunID {
- hostLunID = v.Lun
- break
- }
- }
- if hostLunID == "" {
- log.Errorf("target volume already removed from lun group, lunID: %s", lunID)
- } else {
- err = d.client.RemoveVolumeFromLunGroup(hostLunID, rscName)
- if err != nil {
- log.Errorf("failed to remove volume from lun group, error: %v", err)
- return err
- }
- }
- } else {
- // Delete host affinity
- err = d.client.DeleteHostAffinity(portNumber, rscName)
- if err != nil {
- log.Errorf("failed to delete host affinity, error: %v", err)
- return err
- }
-
- // Delete lun group
- err = d.client.DeleteLunGroupByName(rscName)
- if err != nil {
- log.Errorf("failed to delete lun group, error: %v", err)
- return err
- }
-
- // Delete host
- if opt.GetAccessProtocol() == ISCSIProtocol {
- err = d.client.DeleteIscsiHostByName(rscName)
- } else if opt.GetAccessProtocol() == FCProtocol {
- err = d.client.DeleteFcHostByName(rscName)
- }
- if err != nil {
- log.Errorf("failed to delete host, error: %v", err)
- return err
- }
- }
- log.Infof("terminate connection (%s) success.", opt.GetId())
- return nil
-}
-
-// CreateSnapshot :
-func (d *Driver) CreateSnapshot(opt *pb.CreateVolumeSnapshotOpts) (*model.VolumeSnapshotSpec, error) {
- lunID := opt.GetMetadata()[KLunId]
- // get source volume information for getting pool information
- vol, err := d.client.GetVolume(lunID)
- if err != nil {
- log.Errorf("failed to get volume, error: %v", err)
- return nil, err
- }
- poolName := vol.PoolName
-
- // create snapshot volume
- provPolicy := d.conf.Pool[poolName].Extras.DataStorage.ProvisioningPolicy
- vol, err = d.client.CreateVolume(opt.GetId(), opt.GetSize(), opt.GetDescription(),
- poolName, provPolicy)
- if err != nil {
- log.Errorf("failed to create snapshot volume, error: %v", err)
- return nil, err
- }
-
- // get Client for admin role
- adminClient, err := NewClientForAdmin(&d.conf.AuthOptions)
- if err != nil {
- log.Errorf("failed to get new client, %v", err)
- return nil, err
- }
- err = adminClient.login()
- if err != nil {
- log.Errorf("failed to login, %v", err)
- return nil, err
- }
- defer adminClient.Destroy()
-
- // Start SnapOPC+ session (create shapshot)
- err = adminClient.CreateSnapshot(lunID, vol.Id)
- if err != nil {
- log.Errorf("failed to create snapopc+ session, error: %v", err)
- return nil, err
- }
-
- // get session id
- snapshotList, err := adminClient.ListSnapshot()
- snapshot := SnapShot{}
- for _, v := range snapshotList {
- if v.SrcNo == lunID && v.DestNo == vol.Id {
- snapshot = v
- break
- }
- }
- log.Info("create snapshot success, snapshot id =", opt.GetId())
- return &model.VolumeSnapshotSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Description: opt.GetDescription(),
- VolumeId: opt.GetVolumeId(),
- Size: 0,
- Metadata: map[string]string{
- KSnapId: snapshot.Sid,
- KSnapLunId: vol.Id,
- },
- }, nil
-}
-
-// PullSnapshot :
-func (d *Driver) PullSnapshot(snapIdentifier string) (*model.VolumeSnapshotSpec, error) {
- return nil, &model.NotImplementError{"method PullSnapshot is not implement."}
-}
-
-// DeleteSnapshot :
-func (d *Driver) DeleteSnapshot(opt *pb.DeleteVolumeSnapshotOpts) error {
- sid := opt.GetMetadata()[KSnapId]
- volID := opt.GetMetadata()[KSnapLunId]
-
- // get Client for admin role
- adminClient, err := NewClientForAdmin(&d.conf.AuthOptions)
- if err != nil {
- log.Errorf("failed to create client, error: %v", err)
- return err
- }
- err = adminClient.login()
- if err != nil {
- log.Errorf("failed to login, %v", err)
- return err
- }
- defer adminClient.Destroy()
-
- // delete snapshot
- err = adminClient.DeleteSnapshot(sid)
- if err != nil {
- log.Errorf("failed to delete snapshot, snapshot id = %s , error: %v", opt.GetId(), err)
- return err
- }
-
- // delete snapshot volume
- err = d.client.DeleteVolume(volID)
- if err != nil {
- log.Errorf("failed to delete snapshot volume, volume id = %s , error: %v", volID, err)
- return err
- }
- log.Info("delete snapshot success, snapshot id =", opt.GetId())
- return nil
-}
-
-func (d *Driver) InitializeSnapshotConnection(opt *pb.CreateSnapshotAttachmentOpts) (*model.ConnectionInfo, error) {
- return nil, &model.NotImplementError{S: "method InitializeSnapshotConnection has not been implemented yet."}
-}
-
-func (d *Driver) TerminateSnapshotConnection(opt *pb.DeleteSnapshotAttachmentOpts) error {
- return &model.NotImplementError{S: "method TerminateSnapshotConnection has not been implemented yet."}
-}
-
-func (d *Driver) CreateVolumeGroup(opt *pb.CreateVolumeGroupOpts) (*model.VolumeGroupSpec, error) {
- return nil, &model.NotImplementError{"method CreateVolumeGroup has not been implemented yet"}
-}
-
-func (d *Driver) UpdateVolumeGroup(opt *pb.UpdateVolumeGroupOpts) (*model.VolumeGroupSpec, error) {
- return nil, &model.NotImplementError{"method UpdateVolumeGroup has not been implemented yet"}
-}
-
-func (d *Driver) DeleteVolumeGroup(opt *pb.DeleteVolumeGroupOpts) error {
- return &model.NotImplementError{"method DeleteVolumeGroup has not been implemented yet"}
-}
diff --git a/contrib/drivers/fujitsu/eternus/eternus_test.go b/contrib/drivers/fujitsu/eternus/eternus_test.go
deleted file mode 100644
index 2e4f84139..000000000
--- a/contrib/drivers/fujitsu/eternus/eternus_test.go
+++ /dev/null
@@ -1,1322 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package eternus
-
-import (
- "fmt"
- "os"
- "reflect"
- "strconv"
- "strings"
- "testing"
-
- . "github.com/opensds/opensds/contrib/drivers/utils/config"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- uuid "github.com/satori/go.uuid"
-
- mock "github.com/stretchr/testify/mock"
-)
-
-func checkArg(actual string, expected string) bool {
- actualArr := deleteEmptyStr(strings.Split(actual, " "))
- expectedArr := deleteEmptyStr(strings.Split(expected, " "))
- // same number of element
- if len(actualArr) != len(expectedArr) {
- return false
- }
-
- // same command and args
- for i := 0; i < len(actualArr); i = i + 2 {
- match := false
- for k := 0; k < len(expectedArr); k = k + 2 {
- if actualArr[i] == expectedArr[k] &&
- actualArr[i+1] == expectedArr[k+1] {
- match = true
- break
- }
- }
- if match {
- continue
- }
- return false
- }
-
- return true
-}
-func deleteEmptyStr(arr []string) []string {
- ret := []string{}
- for _, v := range arr {
- if v != "" && v != "\n" {
- ret = append(ret, v)
- }
- }
- return ret
-}
-
-func TestListPools(t *testing.T) {
-
- execCmd := "show thin-pro-pools\n"
- outStr := "\r\nCLI> show thin-pro-pools\r\n"
- outStr += "00\r\n"
- outStr += "00000000\r\n"
- outStr += "0004\r\n"
- outStr += "0011\tosdstest\t01\t00\t04\t0010\t00000000666FC000\t0000000000000000\t01\t5A\t4B\t00\t00\t00\t0000000000200000\t00000001\tFF\t01\r\n"
- outStr += "0012\tpoolname\t01\t00\t04\t0010\t00000000666FC000\t0000000000000000\t01\t5A\t4B\t00\t00\t00\t0000000000200000\t00000001\tFF\t01\r\n"
- outStr += "0013\tpoolname2\t01\t00\t04\t0010\t0000000019000000\t0000000018000000\t01\t5A\t4B\t00\t00\t00\t0000000000200000\t00000001\tFF\t01\r\n"
- outStr += "0014\tpoolname3\t01\t00\t04\t0010\t0000000000000000\t0000000000000000\t01\t5A\t4B\t00\t00\t00\t0000000000200000\t00000001\tFF\t01\r\n"
- outStr += "CLI> "
- client := createIOMock(execCmd, outStr)
-
- d := &Driver{
- conf: &EternusConfig{
- AuthOptions: AuthOptions{
- Endpoint: "1.2.3.4",
- },
- Pool: map[string]PoolProperties{
- "poolname": PoolProperties{
- Extras: model.StoragePoolExtraSpec{
- DataStorage: model.DataStorageLoS{
- ProvisioningPolicy: "Thin",
- },
- },
- StorageType: "block",
- AvailabilityZone: "az-test",
- },
- "poolname2": PoolProperties{
- Extras: model.StoragePoolExtraSpec{
- DataStorage: model.DataStorageLoS{
- ProvisioningPolicy: "Thin",
- },
- },
- StorageType: "block",
- },
- },
- },
- client: client,
- }
-
- ret, err := d.ListPools()
- if err != nil {
- t.Error("Test ListPools failed")
- }
- if len(ret) != 2 {
- t.Error("Test ListPools failed")
- }
-
- host, _ := os.Hostname()
- name := fmt.Sprintf("%s:%s:%s", host, d.conf.Endpoint, "18")
- id := uuid.NewV5(uuid.NamespaceOID, name).String()
- if ret[0].BaseModel.Id != id || ret[0].Name != "poolname" ||
- ret[0].TotalCapacity != 819 || ret[0].FreeCapacity != 819 ||
- ret[0].StorageType != "block" || ret[0].AvailabilityZone != "az-test" ||
- ret[0].Extras.DataStorage.ProvisioningPolicy != "Thin" {
- t.Error("Test ListPools failed")
- }
- name = fmt.Sprintf("%s:%s:%s", host, d.conf.Endpoint, "19")
- id = uuid.NewV5(uuid.NamespaceOID, name).String()
- if ret[1].BaseModel.Id != id || ret[1].Name != "poolname2" ||
- ret[1].TotalCapacity != 200 || ret[1].FreeCapacity != 8 ||
- ret[1].StorageType != "block" || ret[1].AvailabilityZone != "default" ||
- ret[1].Extras.DataStorage.ProvisioningPolicy != "Thin" {
- t.Error("Test ListPools failed")
- }
-}
-
-func TestCreateVolume(t *testing.T) {
- id := "volumeid"
- size := "1"
- sizeInt, _ := strconv.ParseInt(size, 10, 64)
- hashname := GetFnvHash(id)
- poolname := "poolname"
-
- opt := &pb.CreateVolumeOpts{
- Id: id,
- Name: "volumename",
- Size: sizeInt,
- Description: "test description",
- AvailabilityZone: "default",
- ProfileId: "profileid",
- PoolId: "poolid",
- PoolName: poolname,
- Metadata: map[string]string{},
- DriverName: "drivername",
- Context: "",
- }
- execCmd := "create volume -name " + hashname
- execCmd += " -size " + size + "gb"
- execCmd += " -pool-name " + poolname
- execCmd += " -type tpv -allocation thin \n"
-
- outStr := "\r\nCLI> " + execCmd + " \r\n"
- outStr += "00\r\n"
- outStr += "01\r\n"
- outStr += "11\r\n"
- outStr += "CLI> "
- mockWriteCloser := new(MockWriteCloser)
- mockWriteCloser.On("Write",
- mock.MatchedBy(
- func(cmd []byte) bool {
- return checkArg(string(cmd), execCmd)
- }),
- ).Return(1, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(2, nil).Once()
- buff := make([]byte, 65535)
- out := []byte(outStr)
- mockReadCloser := new(MockReadCloser)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- client := &EternusClient{
- stdin: mockWriteCloser,
- stdout: mockReadCloser,
- cliConfPath: "./config/cli_response.yml",
- }
- d := &Driver{
- conf: &EternusConfig{
- Pool: map[string]PoolProperties{
- "poolname": PoolProperties{
- Extras: model.StoragePoolExtraSpec{
- DataStorage: model.DataStorageLoS{
- ProvisioningPolicy: "Thin",
- },
- },
- },
- },
- },
- client: client,
- }
-
- ret, err := d.CreateVolume(opt)
- if err != nil {
- t.Error("Test CreateVolume failed")
- }
- if ret.BaseModel.Id != id || ret.Name != opt.Name ||
- ret.Size != sizeInt || ret.Description != opt.Description ||
- ret.AvailabilityZone != opt.AvailabilityZone ||
- ret.Metadata[KLunId] != "17" {
- t.Error("Test CreateVolume failed")
- }
-}
-
-func TestDeleteVolume(t *testing.T) {
- opt := &pb.DeleteVolumeOpts{
- Id: "id",
- Metadata: map[string]string{
- KLunId: "21",
- },
- DriverName: "drivername",
- Context: "",
- }
-
- execCmd := "delete volume -volume-number 21 \n"
- outStr := "\r\nCLI> " + execCmd + " \r\n"
- outStr += "00\r\n"
- outStr += "CLI> "
- client := createIOMock(execCmd, outStr)
-
- d := &Driver{
- conf: &EternusConfig{
- CeSupport: false,
- },
- client: client,
- }
-
- err := d.DeleteVolume(opt)
- if err != nil {
- t.Error("Test DeleteVolume failed")
- }
-}
-
-func TestExtendVolume(t *testing.T) {
- id := "volumeid"
- lunid := "21"
- size := "2"
- sizeInt, _ := strconv.ParseInt(size, 10, 64)
- poolname := "poolname"
-
- opt := &pb.ExtendVolumeOpts{
- Id: id,
- Name: "volumename",
- Size: sizeInt,
- Description: "test description",
- AvailabilityZone: "default",
- ProfileId: "profileid",
- PoolId: "poolid",
- PoolName: poolname,
- Metadata: map[string]string{
- KLunId: lunid,
- },
- DriverName: "drivername",
- Context: "",
- }
- execCmd := "expand volume -volume-number " + lunid
- execCmd += " -size " + size + "gb \n"
- outStr := "\r\nCLI> " + execCmd + " \r\n"
- outStr += "00\r\n"
- outStr += "CLI> "
- mockWriteCloser := new(MockWriteCloser)
- mockWriteCloser.On("Write",
- mock.MatchedBy(
- func(cmd []byte) bool {
- return checkArg(string(cmd), execCmd)
- }),
- ).Return(1, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(2, nil).Once()
- buff := make([]byte, 65535)
- out := []byte(outStr)
- mockReadCloser := new(MockReadCloser)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- client := &EternusClient{
- stdin: mockWriteCloser,
- stdout: mockReadCloser,
- cliConfPath: "./config/cli_response.yml",
- }
- d := &Driver{
- conf: &EternusConfig{
- CeSupport: false,
- },
- client: client,
- }
-
- ret, err := d.ExtendVolume(opt)
- if err != nil {
- t.Error("Test ExtendVolume failed")
- }
- if ret.BaseModel.Id != id || ret.Name != opt.Name ||
- ret.Size != opt.Size || ret.Description != opt.Description ||
- ret.AvailabilityZone != opt.AvailabilityZone {
- t.Error("Test ExtendVolume failed")
- }
-}
-
-func TestInitializeConnection_IscsiNoPort(t *testing.T) {
- opt := &pb.CreateVolumeAttachmentOpts{
- Id: "id",
- VolumeId: "volumeid",
- DoLocalAttach: false,
- MultiPath: false,
- HostInfo: &pb.HostInfo{
- Platform: "linux",
- OsType: "ubuntu",
- Host: "hostname",
- Ip: "1.1.1.1",
- Initiators: []*pb.Initiator{
- &pb.Initiator{
- Protocol: "iscsi",
- PortName: "iqn.testtest",
- },
- },
- },
- Metadata: map[string]string{
- KLunId: "1",
- },
- DriverName: "drivername",
- Context: "",
- AccessProtocol: "iscsi",
- }
-
- execCmd := "show iscsi-parameters\n"
- outStr := "CLI> show iscsi-parameters\r\n"
- outStr += "00\r\n"
- outStr += "04\r\n"
- outStr += "50\t00\t01\t00\tiqn.eternus-dx1\t\tFF\tDefault\t01\t00\t192.168.1.1\t255.255.255.0\t0.0.0.0\t000000000000\t0CBC\t02\t00000000\t0000\t0.0.0.0\t0C85\t\t00\t00\t00\t0001\t00\tFFFF\t0514\t0000\t\t::\t::\t::\tFF\tFF\t80000000\tFF\tFF\r\n"
- outStr += "50\t01\t00\t01\tiqn.eternus-dx2\t\tFF\tDefault\t01\t00\t192.166.1.2\t255.255.255.0\t0.0.0.0\t000000000000\t0CBC\t02\t00000000\t0000\t0.0.0.0\t0C85\t\t00\t00\t00\t0000\t00\tFFFF\t0514\t0000\t\t::\t::\t::\t00\t00\t80000000\t00\tFF\r\n"
- outStr += "51\t00\t04\t01\tiqn.eternus-dx3\t\t00\tDefault\t01\t00\t192.168.1.2\t255.255.255.0\t0.0.0.0\t000000000000\t0CBC\t02\t00000000\t0000\t0.0.0.0\t0C85\t\t00\t00\t00\t0000\t00\tFFFF\t0514\t0000\t\t::\t::\t::\tFF\tFF\t80000000\tFF\tFF\r\n"
- outStr += "51\t01\t01\t01\tiqn.eternus-dx4\t\tFF\tDefault\t01\t00\t192.166.1.4\t255.255.255.0\t0.0.0.0\t000000000000\t0CBC\t02\t00000000\t0000\t0.0.0.0\t0C85\t\t00\t00\t00\t0000\t00\tFFFF\t0514\t0000\t\t::\t::\t::\t00\t00\t80000000\t00\tFF\r\n"
- outStr += "CLI> "
- client := createIOMock(execCmd, outStr)
-
- d := &Driver{
- conf: &EternusConfig{
- CeSupport: false,
- },
- client: client,
- }
-
- _, err := d.InitializeConnection(opt)
- if err == nil {
- t.Error("Test NewClient failed")
- }
-}
-
-func TestInitializeConnection_Iscsi(t *testing.T) {
- initiator := "iqn.testtest"
- hostname := "hostname"
- ipAddr := "1.1.1.1"
- hashhostname := GetFnvHash(initiator + ipAddr)
- opt := &pb.CreateVolumeAttachmentOpts{
- Id: "id",
- VolumeId: "volumeid",
- DoLocalAttach: false,
- MultiPath: false,
- HostInfo: &pb.HostInfo{
- Platform: "linux",
- OsType: "ubuntu",
- Host: hostname,
- Ip: ipAddr,
- Initiators: []*pb.Initiator{
- &pb.Initiator{
- Protocol: "iscsi",
- PortName: initiator,
- },
- },
- },
- Metadata: map[string]string{
- KLunId: "21",
- },
- DriverName: "drivername",
- Context: "",
- AccessProtocol: ISCSIProtocol,
- }
-
- // Get iscsi port
- execCmd := "show iscsi-parameters\n"
- outStr := "CLI> show iscsi-parameters\r\n"
- outStr += "00\r\n"
- outStr += "01\r\n"
- outStr += "50\t00\t00\t00\tiqn.eternus-dx1\t\tFF\tDefault\t01\t00\t192.168.1.1\t255.255.255.0\t0.0.0.0\t000000000000\t0CBC\t02\t00000000\t0000\t0.0.0.0\t0C85\t\t00\t00\t00\t0001\t00\tFFFF\t0514\t0000\t\t::\t::\t::\tFF\tFF\t80000000\tFF\tFF\r\n"
- outStr += "CLI> "
- mockWriteCloser := new(MockWriteCloser)
- mockWriteCloser.On("Write", []byte(execCmd)).Return(1, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(2, nil).Once()
- mockReadCloser := new(MockReadCloser)
- buff := make([]byte, 65535)
- out := []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Get iscsi host
- execCmd = "show host-iscsi-names\n"
- outStr = "\r\nCLI> show host-iscsi-names\r\n"
- outStr += "00\r\n"
- outStr += "0003\r\n"
- outStr += "0000\tHOST_NAME#0\t00\tDefault\t7F000001\tiqn.testtesttest\t00\r\n"
- outStr += "0001\tHOST_NAME#1\t00\tDefault\t7F000001\tiqn.testtesttesttest\t00\r\n"
- outStr += "0002\ttest_0\t00\tDefault\t02020202\tiqn.test\t00\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write", []byte(execCmd)).Return(3, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(4, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Create iscsi host
- execCmdCreateHost := "create host-iscsi-name"
- execCmdCreateHost += " -name " + hashhostname
- execCmdCreateHost += " -ip " + ipAddr + " -ip-version ipv4"
- execCmdCreateHost += " -iscsi-name " + initiator + " \n"
- outStr = "\r\nCLI> create host-iscsi-name\r\n"
- outStr += "00\r\n"
- outStr += "11\r\n"
- outStr += "01\r\n"
- outStr += "00\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write",
- mock.MatchedBy(
- func(cmd []byte) bool {
- return checkArg(string(cmd), execCmdCreateHost)
- }),
- ).Return(5, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(6, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Get Lun group
- execCmd = "show lun-groups\n"
- outStr = "\r\nCLI> show lun-groups\r\n"
- outStr += "00\r\n"
- outStr += "0001\r\n"
- outStr += "0000\ttest\tFFFF\tFFFF\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write", []byte(execCmd)).Return(7, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(8, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Create Lun group
- execCmdCreateLunGrp := "create lun-group"
- execCmdCreateLunGrp += " -name " + hashhostname
- execCmdCreateLunGrp += " -volume-number 21 -lun 0 \n"
- outStr = "\r\nCLI> create lun-group\r\n"
- outStr += "00\r\n"
- outStr += "12\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write",
- mock.MatchedBy(
- func(cmd []byte) bool {
- return checkArg(string(cmd), execCmdCreateLunGrp)
- }),
- ).Return(9, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(10, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Add host affinity
- execCmdSetHostAffinity := "set host-affinity \n"
- execCmdSetHostAffinity += " -port 010"
- execCmdSetHostAffinity += " -lg-number 18"
- execCmdSetHostAffinity += " -host-number 17 \n"
- outStr = "\r\nCLI> set host-affinity\r\n"
- outStr += "00\r\n"
- outStr += "01\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write",
- mock.MatchedBy(
- func(cmd []byte) bool {
- return checkArg(string(cmd), execCmdSetHostAffinity)
- }),
- ).Return(11, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(12, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Get host lun
- execCmd = "show lun-group -lg-number 18 \n"
- outStr = "\r\nCLI> show lun-group -lg-number 18\r\n"
- outStr += "00\r\n"
- outStr += "0001\r\n"
- outStr += "0000\ttest\tFFFF\tFFFF\r\n"
- outStr += "0003\r\n"
- outStr += "0000\t0014\tvolname1\tA000\t20\t0000000000000000\t00000000000000000000000000000000\r\n"
- outStr += "0015\t0015\tvolname2\tA000\t20\t0000000000000000\t00000000000000000000000000000000\r\n"
- outStr += "0002\t0016\tvolname3\tA000\t20\t0000000000000000\t00000000000000000000000000000000\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write", []byte(execCmd)).Return(13, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(14, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- client := &EternusClient{
- stdin: mockWriteCloser,
- stdout: mockReadCloser,
- cliConfPath: "./config/cli_response.yml",
- }
- d := &Driver{
- conf: &EternusConfig{
- CeSupport: false,
- },
- client: client,
- }
-
- ret, err := d.InitializeConnection(opt)
- if err != nil {
- t.Error("Test InitializeConnection failed")
- }
- connData := ret.ConnectionData
- if !reflect.DeepEqual(connData["targetIQN"], []string{"iqn.eternus-dx1"}) ||
- !reflect.DeepEqual(connData["targetPortal"], []string{"192.168.1.1:3260"}) ||
- connData["targetLun"] != 21 {
- t.Error("Test InitializeConnection failed")
- }
-}
-
-func TestInitializeConnection_FC(t *testing.T) {
- initiator := "AAAAAAAAAAAAAAAA"
- hostname := "hostname"
- ipAddr := "1.1.1.1"
- hashhostname := GetFnvHash(initiator)
- opt := &pb.CreateVolumeAttachmentOpts{
- Id: "id",
- VolumeId: "volumeid",
- DoLocalAttach: false,
- MultiPath: false,
- HostInfo: &pb.HostInfo{
- Platform: "linux",
- OsType: "ubuntu",
- Host: hostname,
- Ip: ipAddr,
- Initiators: []*pb.Initiator{
- &pb.Initiator{
- Protocol: FCProtocol,
- PortName: initiator,
- },
- },
- },
- Metadata: map[string]string{
- KLunId: "21",
- },
- DriverName: "drivername",
- Context: "",
- AccessProtocol: FCProtocol,
- }
-
- // Get iscsi port
- execCmd := "show fc-parameters\n"
- outStr := "CLI> show fc-parameters\r\n"
- outStr += "00\r\n"
- outStr += "01\r\n"
- outStr += "40\t00\t04\t01\tFF\tFF\t00\t0800\t00\tFF\t\t01\t00\t00\t00\t00\tFF\t0000000000000001\t0000000000000000\tFF\r\n"
- outStr += "CLI> "
- mockWriteCloser := new(MockWriteCloser)
- mockWriteCloser.On("Write", []byte(execCmd)).Return(1, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(2, nil).Once()
- mockReadCloser := new(MockReadCloser)
- buff := make([]byte, 65535)
- out := []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Get iscsi host
- execCmd = "show host-wwn-names\n"
- outStr = "\r\nCLI> show host-wwn-names\r\n"
- outStr += "00\r\n"
- outStr += "0003\r\n"
- outStr += "0000\tHOST_NAME#0\t1234567890123456\t0000\tDefault\r\n"
- outStr += "0001\tHOST_NAME#1\t1234567890123457\t0000\tDefault\r\n"
- outStr += "0002\tHOST_NAME#2\t1234567890123458\t0000\tDefault\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write", []byte(execCmd)).Return(3, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(4, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Create iscsi host
-
- execCmdCreateHost := "create host-wwn-name"
- execCmdCreateHost += " -name " + hashhostname
- execCmdCreateHost += " -wwn " + initiator + " \n"
- outStr = "\r\nCLI> create host-wwn-name\r\n"
- outStr += "00\r\n"
- outStr += "11\r\n"
- outStr += "01\r\n"
- outStr += "00\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write",
- mock.MatchedBy(
- func(cmd []byte) bool {
- return checkArg(string(cmd), execCmdCreateHost)
- }),
- ).Return(5, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(6, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Get Lun group
- execCmd = "show lun-groups\n"
- outStr = "\r\nCLI> show lun-groups\r\n"
- outStr += "00\r\n"
- outStr += "0001\r\n"
- outStr += "0000\ttest\tFFFF\tFFFF\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write", []byte(execCmd)).Return(7, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(8, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Create Lun group
- execCmdCreateLunGrp := "create lun-group"
- execCmdCreateLunGrp += " -name " + hashhostname
- execCmdCreateLunGrp += " -volume-number 21 -lun 0 \n"
- outStr = "\r\nCLI> create lun-group\r\n"
- outStr += "00\r\n"
- outStr += "12\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write",
- mock.MatchedBy(
- func(cmd []byte) bool {
- return checkArg(string(cmd), execCmdCreateLunGrp)
- }),
- ).Return(9, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(10, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Add host affinity
- execCmdSetHostAffinity := "set host-affinity \n"
- execCmdSetHostAffinity += " -port 000"
- execCmdSetHostAffinity += " -lg-number 18"
- execCmdSetHostAffinity += " -host-number 17 \n"
- outStr = "\r\nCLI> set host-affinity\r\n"
- outStr += "00\r\n"
- outStr += "01\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write",
- mock.MatchedBy(
- func(cmd []byte) bool {
- return checkArg(string(cmd), execCmdSetHostAffinity)
- }),
- ).Return(11, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(12, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Get host lun
- execCmd = "show lun-group -lg-number 18 \n"
- outStr = "\r\nCLI> show lun-group -lg-number 18\r\n"
- outStr += "00\r\n"
- outStr += "0001\r\n"
- outStr += "0000\ttest\tFFFF\tFFFF\r\n"
- outStr += "0003\r\n"
- outStr += "0000\t0014\tvolname1\tA000\t20\t0000000000000000\t00000000000000000000000000000000\r\n"
- outStr += "0015\t0015\tvolname2\tA000\t20\t0000000000000000\t00000000000000000000000000000000\r\n"
- outStr += "0002\t0016\tvolname3\tA000\t20\t0000000000000000\t00000000000000000000000000000000\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write", []byte(execCmd)).Return(13, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(14, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- client := &EternusClient{
- stdin: mockWriteCloser,
- stdout: mockReadCloser,
- cliConfPath: "./config/cli_response.yml",
- }
- d := &Driver{
- conf: &EternusConfig{
- CeSupport: false,
- },
- client: client,
- }
-
- ret, err := d.InitializeConnection(opt)
- if err != nil {
- t.Error("Test InitializeConnection failed")
- }
- connData := ret.ConnectionData
- if !reflect.DeepEqual(connData["targetWWNs"], []string{"0000000000000001"}) ||
- connData["hostName"] != hostname ||
- connData["targetLun"] != 21 {
-
- t.Error("Test InitializeConnection failed")
- }
-}
-
-func TestInitializeConnection_FCNoInitiator(t *testing.T) {
- initiator := ""
- hostname := "hostname"
- ipAddr := "1.1.1.1"
- opt := &pb.CreateVolumeAttachmentOpts{
- Id: "id",
- VolumeId: "volumeid",
- DoLocalAttach: false,
- MultiPath: false,
- HostInfo: &pb.HostInfo{
- Platform: "linux",
- OsType: "ubuntu",
- Host: hostname,
- Ip: ipAddr,
- Initiators: []*pb.Initiator{
- &pb.Initiator{
- Protocol: FCProtocol,
- PortName: initiator,
- },
- },
- },
- Metadata: map[string]string{
- KLunId: "21",
- },
- DriverName: "drivername",
- Context: "",
- AccessProtocol: FCProtocol,
- }
-
- // Get iscsi port
- execCmd := "show fc-parameters\n"
- outStr := "CLI> show fc-parameters\r\n"
- outStr += "00\r\n"
- outStr += "01\r\n"
- outStr += "40\t00\t04\t01\tFF\tFF\t00\t0800\t01\tFF\t\t01\t00\t00\t00\t00\tFF\t0000000000000001\t0000000000000000\tFF\r\n"
- outStr += "CLI> "
- mockWriteCloser := new(MockWriteCloser)
- mockWriteCloser.On("Write", []byte(execCmd)).Return(1, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(2, nil).Once()
- mockReadCloser := new(MockReadCloser)
- buff := make([]byte, 65535)
- out := []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Show mapping
- execCmd = "show mapping -port 000 \n"
- outStr = "\r\nCLI> show mapping -port 000\r\n"
- outStr += "00\r\n"
- outStr += "0001\r\n"
- outStr += "11005100\t00\r\n"
- outStr += "0001\r\n"
- outStr += "0000\t0001\tosds-643e8232-1b\tA000\t20\t0000000000200000\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write", []byte(execCmd)).Return(1, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(2, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Add mapping
- execCmdSetMapping := "set mapping"
- execCmdSetMapping += " -port 000"
- execCmdSetMapping += " -volume-number 21"
- execCmdSetMapping += " -lun 1 \n"
- outStr = "\r\nCLI> set mapping\r\n"
- outStr += "00\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write",
- mock.MatchedBy(
- func(cmd []byte) bool {
- return checkArg(string(cmd), execCmdSetMapping)
- }),
- ).Return(11, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(12, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- client := &EternusClient{
- stdin: mockWriteCloser,
- stdout: mockReadCloser,
- cliConfPath: "./config/cli_response.yml",
- }
- d := &Driver{
- conf: &EternusConfig{
- CeSupport: false,
- },
- client: client,
- }
-
- ret, err := d.InitializeConnection(opt)
- if err != nil {
- t.Error("Test InitializeConnection failed")
- }
- connData := ret.ConnectionData
- if !reflect.DeepEqual(connData["targetWWNs"], []string{"0000000000000001"}) ||
- connData["hostName"] != hostname ||
- connData["targetLun"] != 1 {
- t.Error("Test InitializeConnection failed")
- }
-}
-
-func TestTerminateConnection_Iscsi(t *testing.T) {
- initiator := "iqn.testtest"
- hostname := "hostname"
- ipAddr := "1.1.1.1"
- hashhostname := GetFnvHash(initiator + ipAddr)
- opt := &pb.DeleteVolumeAttachmentOpts{
- Id: "id",
- VolumeId: "volumeid",
- HostInfo: &pb.HostInfo{
- Platform: "linux",
- OsType: "ubuntu",
- Host: hostname,
- Ip: ipAddr,
- Initiators: []*pb.Initiator{
- &pb.Initiator{
- Protocol: "iscsi",
- PortName: initiator,
- },
- },
- },
- Metadata: map[string]string{
- KLunId: "21",
- },
- DriverName: "drivername",
- Context: "",
- AccessProtocol: ISCSIProtocol,
- }
-
- // Get iscsi port
- execCmd := "show iscsi-parameters\n"
- outStr := "CLI> show iscsi-parameters\r\n"
- outStr += "00\r\n"
- outStr += "01\r\n"
- outStr += "50\t00\t00\t00\tiqn.eternus-dx1\t\tFF\tDefault\t01\t00\t192.168.1.1\t255.255.255.0\t0.0.0.0\t000000000000\t0CBC\t02\t00000000\t0000\t0.0.0.0\t0C85\t\t00\t00\t00\t0001\t00\tFFFF\t0514\t0000\t\t::\t::\t::\tFF\tFF\t80000000\tFF\tFF\r\n"
- outStr += "CLI> "
- mockWriteCloser := new(MockWriteCloser)
- mockWriteCloser.On("Write", []byte(execCmd)).Return(1, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(2, nil).Once()
- mockReadCloser := new(MockReadCloser)
- buff := make([]byte, 65535)
- out := []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Get host lun
- execCmd = "show lun-group -lg-name " + hashhostname + " \n"
- outStr = "\r\nCLI> show lun-group -lg-name " + hashhostname + "\r\n"
- outStr += "00\r\n"
- outStr += "0001\r\n"
- outStr += "0000\ttest\tFFFF\tFFFF\r\n"
- outStr += "0003\r\n"
- outStr += "0000\t0014\tvolname1\tA000\t20\t0000000000000000\t00000000000000000000000000000000\r\n"
- outStr += "0015\t0015\tvolname2\tA000\t20\t0000000000000000\t00000000000000000000000000000000\r\n"
- outStr += "0002\t0016\tvolname3\tA000\t20\t0000000000000000\t00000000000000000000000000000000\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write", []byte(execCmd)).Return(1, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(12, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Remove volume from lun group
- execCmd = "delete lun-group -lg-name " + hashhostname + " -lun 21 \n"
- outStr = "\r\nCLI> " + execCmd + "\r\n"
- outStr += "00\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write",
- mock.MatchedBy(
- func(cmd []byte) bool {
- return checkArg(string(cmd), execCmd)
- }),
- ).Return(3, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(4, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- client := &EternusClient{
- stdin: mockWriteCloser,
- stdout: mockReadCloser,
- cliConfPath: "./config/cli_response.yml",
- }
- d := &Driver{
- conf: &EternusConfig{
- CeSupport: false,
- },
- client: client,
- }
-
- err := d.TerminateConnection(opt)
- if err != nil {
- t.Error("Test TerminateConnection failed")
- }
-}
-
-func TestTerminateConnection_IscsiDLunGroup(t *testing.T) {
- initiator := "iqn.testtest"
- hostname := "hostname"
- ipAddr := "1.1.1.1"
- hashhostname := GetFnvHash(initiator + ipAddr)
- opt := &pb.DeleteVolumeAttachmentOpts{
- Id: "id",
- VolumeId: "volumeid",
- HostInfo: &pb.HostInfo{
- Platform: "linux",
- OsType: "ubuntu",
- Host: hostname,
- Ip: ipAddr,
- Initiators: []*pb.Initiator{
- &pb.Initiator{
- Protocol: "iscsi",
- PortName: initiator,
- },
- },
- },
- Metadata: map[string]string{
- KLunId: "21",
- },
- DriverName: "drivername",
- Context: "",
- AccessProtocol: ISCSIProtocol,
- }
-
- // Get iscsi port
- execCmd := "show iscsi-parameters\n"
- outStr := "CLI> show iscsi-parameters\r\n"
- outStr += "00\r\n"
- outStr += "01\r\n"
- outStr += "50\t00\t00\t00\tiqn.eternus-dx1\t\tFF\tDefault\t01\t00\t192.168.1.1\t255.255.255.0\t0.0.0.0\t000000000000\t0CBC\t02\t00000000\t0000\t0.0.0.0\t0C85\t\t00\t00\t00\t0001\t00\tFFFF\t0514\t0000\t\t::\t::\t::\tFF\tFF\t80000000\tFF\tFF\r\n"
- outStr += "CLI> "
- mockWriteCloser := new(MockWriteCloser)
- mockWriteCloser.On("Write", []byte(execCmd)).Return(1, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(2, nil).Once()
- mockReadCloser := new(MockReadCloser)
- buff := make([]byte, 65535)
- out := []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Get host lun
- execCmd = "show lun-group -lg-name " + hashhostname + " \n"
- outStr = "\r\nCLI> show lun-group -lg-name " + hashhostname + "\r\n"
- outStr += "00\r\n"
- outStr += "0001\r\n"
- outStr += "0000\ttest\tFFFF\tFFFF\r\n"
- outStr += "0001\r\n"
- outStr += "0015\t0015\tvolname2\tA000\t20\t0000000000000000\t00000000000000000000000000000000\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write", []byte(execCmd)).Return(1, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(12, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Remove volume from lun group
- execCmdReleaseHostAffinity := "release host-affinity -port 010"
- execCmdReleaseHostAffinity += " -host-name " + hashhostname + " -mode all \n"
- outStr = "\r\nCLI> " + execCmd + "\r\n"
- outStr += "00\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write",
- mock.MatchedBy(
- func(cmd []byte) bool {
- return checkArg(string(cmd), execCmdReleaseHostAffinity)
- }),
- ).Return(3, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(4, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Delete lun group
- execCmd = "delete lun-group -lg-name " + hashhostname + " \n"
- outStr = "\r\nCLI> " + execCmd + "\r\n"
- outStr += "00\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write", []byte(execCmd)).Return(1, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(6, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Delete host
- execCmd = "delete host-iscsi-name -host-name " + hashhostname + " \n"
- outStr = "\r\nCLI> " + execCmd + "\r\n"
- outStr += "00\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write", []byte(execCmd)).Return(1, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(6, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- client := &EternusClient{
- stdin: mockWriteCloser,
- stdout: mockReadCloser,
- cliConfPath: "./config/cli_response.yml",
- }
- d := &Driver{
- conf: &EternusConfig{
- CeSupport: false,
- },
- client: client,
- }
-
- err := d.TerminateConnection(opt)
- if err != nil {
- t.Error("Test TerminateConnection failed")
- }
-}
-
-func TestTerminateConnection_FcDLunGroup(t *testing.T) {
- initiator := "AAAAAAAAAAAAAAAA"
- hostname := "hostname"
- ipAddr := "1.1.1.1"
- hashhostname := GetFnvHash(initiator)
- opt := &pb.DeleteVolumeAttachmentOpts{
- Id: "id",
- VolumeId: "volumeid",
- HostInfo: &pb.HostInfo{
- Platform: "linux",
- OsType: "ubuntu",
- Host: hostname,
- Ip: ipAddr,
- Initiators: []*pb.Initiator{
- &pb.Initiator{
- Protocol: FCProtocol,
- PortName: initiator,
- },
- },
- },
- Metadata: map[string]string{
- KLunId: "21",
- },
- DriverName: "drivername",
- Context: "",
- AccessProtocol: FCProtocol,
- }
-
- // Get iscsi port
- execCmd := "show fc-parameters\n"
- outStr := "CLI> show fc-parameters\r\n"
- outStr += "00\r\n"
- outStr += "01\r\n"
- outStr += "40\t00\t04\t01\tFF\tFF\t00\t0800\t00\tFF\t\t01\t00\t00\t00\t00\tFF\t0000000000000001\t0000000000000000\tFF\r\n"
- outStr += "CLI> "
- mockWriteCloser := new(MockWriteCloser)
- mockWriteCloser.On("Write", []byte(execCmd)).Return(1, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(2, nil).Once()
- mockReadCloser := new(MockReadCloser)
- buff := make([]byte, 65535)
- out := []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Get host lun
- execCmd = "show lun-group -lg-name " + hashhostname + " \n"
- outStr = "\r\nCLI> show lun-group -lg-name " + hashhostname + "\r\n"
- outStr += "00\r\n"
- outStr += "0001\r\n"
- outStr += "0000\ttest\tFFFF\tFFFF\r\n"
- outStr += "0001\r\n"
- outStr += "0015\t0015\tvolname2\tA000\t20\t0000000000000000\t00000000000000000000000000000000\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write", []byte(execCmd)).Return(1, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(12, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Remove volume from lun group
- execCmdReleaseHostAffinity := "release host-affinity -port 000"
- execCmdReleaseHostAffinity += " -host-name " + hashhostname + " -mode all \n"
- outStr = "\r\nCLI> " + execCmd + "\r\n"
- outStr += "00\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write",
- mock.MatchedBy(
- func(cmd []byte) bool {
- return checkArg(string(cmd), execCmdReleaseHostAffinity)
- }),
- ).Return(3, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(4, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Delete lun group
- execCmd = "delete lun-group -lg-name " + hashhostname + " \n"
- outStr = "\r\nCLI> " + execCmd + "\r\n"
- outStr += "00\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write", []byte(execCmd)).Return(1, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(6, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Delete host
- execCmd = "delete host-wwn-name -host-name " + hashhostname + " \n"
- outStr = "\r\nCLI> " + execCmd + "\r\n"
- outStr += "00\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write", []byte(execCmd)).Return(1, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(6, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- client := &EternusClient{
- stdin: mockWriteCloser,
- stdout: mockReadCloser,
- cliConfPath: "./config/cli_response.yml",
- }
- d := &Driver{
- conf: &EternusConfig{
- CeSupport: false,
- },
- client: client,
- }
-
- err := d.TerminateConnection(opt)
- if err != nil {
- t.Error("Test TerminateConnection failed")
- }
-}
-
-func TestAddMapping(t *testing.T) {
- // Show mapping
- execCmd := "show mapping -port 000 \n"
- outStr := "\r\nCLI> show mapping -port 000\r\n"
- outStr += "00\r\n"
- outStr += "0001\r\n"
- outStr += "11005100\t00\r\n"
- outStr += "0003\r\n"
- outStr += "0000\t0001\tvol1\tA000\t20\t0000000000200000\r\n"
- outStr += "0001\t0002\tvol2\tA000\t20\t0000000000200000\r\n"
- outStr += "0003\t0002\tvol2\tA000\t20\t0000000000200000\r\n"
- outStr += "CLI> "
- mockWriteCloser := new(MockWriteCloser)
- mockWriteCloser.On("Write", []byte(execCmd)).Return(1, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(2, nil).Once()
- mockReadCloser := new(MockReadCloser)
- buff := make([]byte, 65535)
- out := []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Add mapping
- execCmdSetMapping := "set mapping"
- execCmdSetMapping += " -port 000"
- execCmdSetMapping += " -volume-number 21"
- execCmdSetMapping += " -lun 2 \n"
- outStr = "\r\nCLI> set mapping\r\n"
- outStr += "00\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write",
- mock.MatchedBy(
- func(cmd []byte) bool {
- return checkArg(string(cmd), execCmdSetMapping)
- }),
- ).Return(11, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(12, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- client := &EternusClient{
- stdin: mockWriteCloser,
- stdout: mockReadCloser,
- cliConfPath: "./config/cli_response.yml",
- }
- d := &Driver{
- conf: &EternusConfig{
- CeSupport: false,
- },
- client: client,
- }
-
- ret, err := d.addMapping("000", "21")
- if err != nil {
- t.Error("Test addMapping failed")
- }
- if ret != "2" {
- t.Error("Test addMapping failed")
- }
-}
-
-func TestAddMapping_Max(t *testing.T) {
- // Show mapping
- execCmd := "show mapping -port 000 \n"
- outStr := "\r\nCLI> show mapping -port 000\r\n"
- outStr += "00\r\n"
- outStr += "0001\r\n"
- outStr += "11005100\t00\r\n"
- outStr += "0400\r\n"
- for i := 0; i < 1024; i++ {
- outStr += strconv.Itoa(i) + "\t0001\tvol1\tA000\t20\t0000000000200000\r\n"
- }
- outStr += "CLI> "
- mockWriteCloser := new(MockWriteCloser)
- mockWriteCloser.On("Write", []byte(execCmd)).Return(1, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(2, nil).Once()
- mockReadCloser := new(MockReadCloser)
- buff := make([]byte, 65535)
- out := []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- client := &EternusClient{
- stdin: mockWriteCloser,
- stdout: mockReadCloser,
- cliConfPath: "./config/cli_response.yml",
- }
- d := &Driver{
- conf: &EternusConfig{
- CeSupport: false,
- },
- client: client,
- }
-
- _, err := d.addMapping("000", "21")
- if err == nil {
- t.Error("Test addMapping failed")
- }
-}
-
-func TestDeleteMapping(t *testing.T) {
- // Show mapping
- execCmd := "show mapping -port 000 \n"
- outStr := "\r\nCLI> show mapping -port 000\r\n"
- outStr += "00\r\n"
- outStr += "0001\r\n"
- outStr += "11005100\t00\r\n"
- outStr += "0003\r\n"
- outStr += "0000\t0001\tvol1\tA000\t20\t0000000000200000\r\n"
- outStr += "0001\t0002\tvol2\tA000\t20\t0000000000200000\r\n"
- outStr += "0003\t0011\tvol2\tA000\t20\t0000000000200000\r\n"
- outStr += "CLI> "
- mockWriteCloser := new(MockWriteCloser)
- mockWriteCloser.On("Write", []byte(execCmd)).Return(1, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(2, nil).Once()
- mockReadCloser := new(MockReadCloser)
- buff := make([]byte, 65535)
- out := []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- // Delete mapping
- execCmdSetMapping := "release mapping"
- execCmdSetMapping += " -port 000"
- execCmdSetMapping += " -lun 3 \n"
- outStr = "\r\nCLI> release mapping\r\n"
- outStr += "00\r\n"
- outStr += "CLI> "
- mockWriteCloser.On("Write",
- mock.MatchedBy(
- func(cmd []byte) bool {
- return checkArg(string(cmd), execCmdSetMapping)
- }),
- ).Return(11, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(12, nil).Once()
- buff = make([]byte, 65535)
- out = []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- client := &EternusClient{
- stdin: mockWriteCloser,
- stdout: mockReadCloser,
- cliConfPath: "./config/cli_response.yml",
- }
- d := &Driver{
- conf: &EternusConfig{
- CeSupport: false,
- },
- client: client,
- }
-
- err := d.deleteMapping("000", "17")
- if err != nil {
- t.Error("Test deleteMapping failed")
- }
-}
-
-func TestDeleteMapping_Deleted(t *testing.T) {
- // Show mapping
- execCmd := "show mapping -port 000 \n"
- outStr := "\r\nCLI> show mapping -port 000\r\n"
- outStr += "00\r\n"
- outStr += "0001\r\n"
- outStr += "11005100\t00\r\n"
- outStr += "0003\r\n"
- outStr += "0000\t0001\tvol1\tA000\t20\t0000000000200000\r\n"
- outStr += "0001\t0002\tvol2\tA000\t20\t0000000000200000\r\n"
- outStr += "0003\t0010\tvol2\tA000\t20\t0000000000200000\r\n"
- outStr += "CLI> "
- mockWriteCloser := new(MockWriteCloser)
- mockWriteCloser.On("Write", []byte(execCmd)).Return(1, nil).Once()
- mockWriteCloser.On("Write", []byte("\n")).Return(2, nil).Once()
- mockReadCloser := new(MockReadCloser)
- buff := make([]byte, 65535)
- out := []byte(outStr)
- mockReadCloser.On("Read", buff).Return(len(out), nil, out).Once()
-
- client := &EternusClient{
- stdin: mockWriteCloser,
- stdout: mockReadCloser,
- cliConfPath: "./config/cli_response.yml",
- }
- d := &Driver{
- conf: &EternusConfig{
- CeSupport: false,
- },
- client: client,
- }
-
- err := d.deleteMapping("000", "17")
- if err != nil {
- t.Error("Test deleteMapping failed")
- }
-}
diff --git a/contrib/drivers/fujitsu/eternus/model.go b/contrib/drivers/fujitsu/eternus/model.go
deleted file mode 100644
index 3332368de..000000000
--- a/contrib/drivers/fujitsu/eternus/model.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package eternus
-
-type System struct {
- Id string `json:"ID"`
- Name string `json:"NAME"`
- Location string `json:"LOCATION"`
- ProductMode string `json:"PRODUCTMODE"`
- Wwn string `json:"wwn"`
-}
-
-// StoragePool is Pool information
-type StoragePool struct {
- Id string
- Name string
- TotalCapacity int64
- FreeCapacity int64
-}
-
-// Volume is Pool information
-type Volume struct {
- Id string
- Name string
- Status string
- Size int64
- TotalCapacity int64
- FreeCapacity int64
- PoolName string
- PoolId string
-}
-
-// IscsiPortInfo is iscsi port info
-type IscsiPortInfo struct {
- PortNumber string
- IscsiName string
- Ip string
- TcpPort int
- IsnsServerIp string
- IsnsServerPort int
-}
-
-// FcPortInfo is iscsi port info
-type FcPortInfo struct {
- PortNumber string
- Wwpn string
-}
-
-// LunGroup is lun group info
-type LunGroup struct {
- Volumes []LunGroupVolume
-}
-
-// LunGroupVolume is lun group info
-type LunGroupVolume struct {
- Id string
- Name string
- RawStatus string
- RoundStatus string
- Size int64
- Uid string
- Lun string
-}
-
-// Mapping
-type Mapping struct {
- Lun string
- VolumeNumber string
- VolumeName string
- VolumeRawStatus string
- VolumeRoundStatus string
- VolumeSize int64
-}
-
-// SnapShot
-type SnapShot struct {
- Sid string
- Gen string
- GenTotal string
- Type string
- VolumeType string
- SrcNo string
- SrcName string
- DestNo string
- DestName string
- Status string
- Phase string
- ErrorCode string
- Requestor string
-}
diff --git a/contrib/drivers/hpe/nimble/client.go b/contrib/drivers/hpe/nimble/client.go
deleted file mode 100755
index 6ba5f87db..000000000
--- a/contrib/drivers/hpe/nimble/client.go
+++ /dev/null
@@ -1,664 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nimble
-
-import (
- "crypto/tls"
- "encoding/json"
- "fmt"
- "net/http"
- "strings"
-
- "github.com/astaxie/beego/httplib"
- log "github.com/golang/glog"
- "github.com/opensds/opensds/contrib/drivers/utils"
- . "github.com/opensds/opensds/contrib/drivers/utils/config"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
-)
-
-const (
- requestRetryTimes = 1
- FcInitiatorDefaultGrpName = "OsdsFcGrp"
- IscsiInitiatorDefaultGrpName = "OsdsIscsiGrp"
-
- tokenUrlPath = "/tokens"
- poolUrlPath = "/pools"
- poolDetailUrlPath = "/pools/detail"
- volumeUrlPath = "/volumes"
- snapshotUrlPath = "/snapshots"
- initiatorUrlPath = "/initiators"
- initiatorGrpUrlPath = "/initiator_groups"
- accessControlUrlPath = "/access_control_records"
-)
-
-// *****************Original Errors***********************
-func (e *ClinetErrors) Error() string {
- var errStrings []string
- for _, err := range e.Errs {
- errStrings = append(errStrings, fmt.Sprint(err))
- }
- return fmt.Sprint(strings.Join(errStrings, "\n"))
-}
-
-func (e *ArrayInnerErrorBody) Error() string {
- var errStrings []string
- for _, err := range e.Errs {
- errStrings = append(errStrings, fmt.Sprint(err.Error()))
- }
- return fmt.Sprint(strings.Join(errStrings, "\n"))
-}
-
-func (e *ArrayInnerErrorResp) Error() string {
- return fmt.Sprintf("code:%v severity:%v text:%v", e.Code, e.Severity, e.Text)
-}
-
-// *******************************************************
-
-func unset(strings []string, search string) []string {
- result := []string{}
- deleteFlag := false
- for _, v := range strings {
- if v == search && !deleteFlag {
- deleteFlag = true
- continue
- }
- result = append(result, v)
- }
- return result
-}
-
-func NewClient(opt *AuthOptions) (*NimbleClient, error) {
- edp := strings.Split(opt.Endpoints, ",")
- c := &NimbleClient{
- user: opt.Username,
- passwd: opt.Password,
- endpoints: edp,
- insecure: opt.Insecure,
- }
- err := c.login()
- if err != nil {
- return nil, err
- }
-
- return c, nil
-}
-
-func (c *NimbleClient) login() error {
- reqBody := &LoginReqBody{
- Data: LoginReqData{
- Username: c.user,
- Password: c.passwd,
- },
- }
- var tokens []string
- var errs []error
- edp := c.endpoints
-
- for _, ep := range edp {
- url := ep + tokenUrlPath
- auth := &AuthRespBody{}
- token := ""
- log.Infof("%v: trying login to %v", DriverName, ep)
- b, _, err := c.doRequest("POST", url, reqBody, token)
-
- // Basic HTTP Request Error
- if err != nil {
- log.Errorf("%v: login failed.", DriverName)
- c.endpoints = unset(c.endpoints, ep) // Delete invalid endpoint from client
- errs = append(errs, err)
- continue
- }
- json.Unmarshal(b, auth)
- tokens = append(tokens, auth.Data.SessionToken)
- log.Infof("%v: got token from %v", DriverName, url)
- }
-
- c.tokens = tokens // Insert valid tokes into client
-
- if len(errs) != 0 {
- err := &ClinetErrors{errs}
- return err
- }
- return nil
-}
-
-func (c *NimbleClient) doRequest(method, url string, in interface{}, token string) ([]byte, http.Header, error) {
- req := httplib.NewBeegoRequest(url, method)
- req.SetTLSClientConfig(&tls.Config{
- InsecureSkipVerify: c.insecure,
- })
- req.Header("Connection", "keep-alive")
- req.Header("Content-Type", "application/json;charset=utf-8")
- req.Header("X-Auth-Token", token) // Set token
-
- if in != nil {
- body, _ := json.Marshal(in)
- req.Body(body)
-
- }
-
- resp, err := req.Response()
- if err != nil {
- log.Errorf("%v: http request failed, method: %s url: %s error: %v", DriverName, method, url, err)
- return nil, nil, err
- }
-
- b, err := req.Bytes()
- if err != nil {
- log.Errorf("%v: get byte[] from response failed, method: %s url: %s error: %v", DriverName, method, url, err)
- return nil, nil, err
- }
-
- inErr := &ArrayInnerErrorBody{}
- json.Unmarshal(b, inErr)
- if len(inErr.Errs) != 0 {
- log.Errorf("%v: get error Infof from response failed, method: %s url: %s error: %v", DriverName, method, url, inErr)
- return nil, nil, inErr
- }
-
- return b, resp.Header, nil
-}
-
-func (c *NimbleClient) ListStoragePools() ([]StoragePoolRespData, error) {
- resp := &StoragePoolsRespBody{}
- pools := []StoragePoolRespData{}
- var err error
- var errs []error
-
- if len(c.endpoints) == 0 {
- log.Errorf("%v: there are no valid endpoints.", DriverName)
- return nil, fmt.Errorf("%v: cannot get storage pools\n", DriverName)
- }
- for i, ep := range c.endpoints {
- err = c.request("GET", ep+poolDetailUrlPath, nil, resp, c.tokens[i])
- if err != nil {
- errs = append(errs, err)
- continue
- }
- // Set endpoint which belonging to the storage pool
- for j, _ := range resp.Data {
- resp.Data[j].Endpoint = ep
- resp.Data[j].Token = c.tokens[i]
- }
- pools = append(pools, resp.Data...)
- }
-
- if len(errs) != 0 {
- err = &ClinetErrors{errs}
- }
- return pools, err
-}
-
-func (c *NimbleClient) request(method, url string, in, out interface{}, token string) error {
- var b []byte
- var errReq error
- var errs []error
- for i := 0; i < requestRetryTimes; i++ {
- b, _, errReq = c.doRequest(method, url, in, token)
- if errReq == nil {
- json.Unmarshal(b, out)
- log.Infof("%v: got response from %v.", DriverName, url)
- break
- } else {
-
- log.Errorf("%v: url:%s %s body:%+v", DriverName, method, url, in)
-
- // Token expired handling
- if inErr, ok := errReq.(*ArrayInnerErrorBody); ok {
- for j := range inErr.Errs {
- if inErr.Errs[j].Code == ErrorUnauthorizedToServer {
- log.Errorf("%v: auth failure, trying re-login....", DriverName)
- if errLogin := c.login(); errLogin == nil {
- log.Infof("%v: relogin success!!", DriverName)
- break
- } else {
- log.Errorf("%v: relogin failed.", DriverName)
- }
- }
- }
- }
- }
-
- if i == requestRetryTimes-1 {
- log.Errorf("%v: finally, could not get response from %v.", DriverName, url)
- errs = append(errs, errReq)
- }
- }
-
- if len(errs) != 0 {
- err := &ClinetErrors{errs}
- return err
- }
- return nil
-}
-
-func (c *NimbleClient) GetPoolIdByName(poolName string) (string, error) {
- pools, err := c.ListStoragePools()
- if err != nil {
- return "", err
- }
- for _, p := range pools {
- if p.Name == poolName {
- return p.Id, nil
- }
- }
- return "", fmt.Errorf("%v: not found specified pool '%s'\n", DriverName, poolName)
-}
-
-func (c *NimbleClient) GetTokenByPoolId(poolId string) (string, string, error) {
- pools, err := c.ListStoragePools()
- if err != nil {
- return "", "", err
- }
- for _, p := range pools {
- if p.Id == poolId {
- return p.Endpoint, p.Token, nil
- }
- }
- return "", "", fmt.Errorf("%v: not found specified pool '%s'\n", DriverName, poolId)
-}
-
-func (c *NimbleClient) CreateVolume(poolId string, opt *pb.CreateVolumeOpts) (*VolumeRespData, error) {
-
- /* Get endpoint and token for spwcific pool */
- ep, token, err := c.GetTokenByPoolId(poolId)
- if err != nil {
- return nil, err
- }
-
- // Parse options from Profile----------------------------------------------
- profileOpt := &model.ProfileSpec{}
- reqOptions := CreateVolumeReqData{}
- if err := json.Unmarshal([]byte(opt.GetProfile()), profileOpt); err != nil {
- return nil, err
- }
-
- options, err := json.Marshal(profileOpt.CustomProperties)
- if err != nil {
- return nil, err
- }
-
- if err := json.Unmarshal(options, &reqOptions); err != nil {
- return nil, err
- }
- // -------------------------------------------------------------------------
-
- reqOptions.Name = opt.GetId()
- reqOptions.PoolId = poolId
- reqOptions.Size = Gib2Mebi(opt.GetSize())
- reqOptions.Description = TruncateDescription(opt.GetDescription())
-
- // Create volume from snapshot
- if opt.GetSnapshotId() != "" {
- log.Infof("%v: try to create volume from snapshot...", DriverName)
- log.Infof("%v: snap id: %v", DriverName, opt.GetSnapshotId())
- storageSnapshotId, err := c.GetStorageSnapshotId(poolId, opt.GetSnapshotId())
- if err != nil {
- return nil, err
- }
- if storageSnapshotId == "" {
- err = fmt.Errorf("%v: there is no such snapshot name on storage => %v\n", DriverName, opt.GetSnapshotId())
- return nil, err
- }
-
- reqOptions.BaseSnapId = storageSnapshotId
- reqOptions.Clone = true
- }
-
- lunResp := &VolumeRespBody{}
- reqBody := &CreateVolumeReqBody{Data: reqOptions}
- err = c.request("POST", ep+volumeUrlPath, reqBody, lunResp, token)
- return &lunResp.Data, err
-}
-
-func (c *NimbleClient) ListVolume(poolId string) (*AllVolumeRespBody, error) {
- /* Get endpoint and token for spwcific pool */
- ep, token, err := c.GetTokenByPoolId(poolId)
- if err != nil {
- return nil, err
- }
- respBody := &AllVolumeRespBody{}
- err = c.request("GET", ep+volumeUrlPath+"/detail", nil, respBody, token)
- return respBody, err
-}
-
-func (c *NimbleClient) GetStorageVolumeId(poolId string, volName string) (string, error) {
- storageVolumeId := ""
-
- respBody, err := c.ListVolume(poolId)
- if err != nil {
- return storageVolumeId, err
- }
-
- for _, data := range respBody.Data {
- if data.Name == volName {
- storageVolumeId = data.Id
- return storageVolumeId, nil
- }
- }
- return storageVolumeId, fmt.Errorf("%v: could not get storage volume ID of %v\n", DriverName, volName)
-}
-
-func (c *NimbleClient) GetStorageSnapshotId(poolId string, baseSnapName string) (string, error) {
- storageSnapshotId := ""
-
- // Get all volume names
- volResp, err := c.ListVolume(poolId)
- if err != nil {
- return storageSnapshotId, err
- }
-
- for _, volData := range volResp.Data {
- ep, token, err := c.GetTokenByPoolId(poolId)
- if err != nil {
- return storageSnapshotId, err
- }
-
- respBody := &AllSnapshotRespBody{}
- err = c.request("GET", ep+snapshotUrlPath+"?vol_name="+volData.Name, nil, respBody, token)
- if err != nil {
- return storageSnapshotId, err
- }
-
- for _, snapData := range respBody.Data {
- if baseSnapName == snapData.Name {
- storageSnapshotId = snapData.Id
- return storageSnapshotId, nil
- }
- }
-
- }
-
- return storageSnapshotId, err
-}
-
-func (c *NimbleClient) DeleteVolume(poolId string, opt *pb.DeleteVolumeOpts) error {
- lunId := opt.GetMetadata()["LunId"]
- ep, token, err := c.GetTokenByPoolId(poolId)
- if err != nil {
- return err
- }
-
- err = c.OfflineVolume(lunId, poolId)
- if err != nil {
- return err
- }
-
- // Nimble storage daes not support delete options. No need to parse values in profile.
-
- err = c.request("DELETE", ep+volumeUrlPath+"/"+lunId, nil, nil, token)
- return err
-}
-
-func (c *NimbleClient) OfflineVolume(id string, poolId string) error {
- ep, token, err := c.GetTokenByPoolId(poolId)
- if err != nil {
- return err
- }
-
- reqOptions := OfflineVolumeReqData{
- Online: false,
- }
-
- reqBody := &OfflineVolumeReqBody{Data: reqOptions}
- err = c.request("PUT", ep+volumeUrlPath+"/"+id, reqBody, nil, token)
- return err
-}
-
-func (c *NimbleClient) ExtendVolume(poolId string, opt *pb.ExtendVolumeOpts) (*VolumeRespData, error) {
- lunId := opt.GetMetadata()["LunId"]
- ep, token, err := c.GetTokenByPoolId(poolId)
- if err != nil {
- return nil, err
- }
-
- // Parse options from Profile----------------------------------------------
- reqOptions := ExtendVolumeReqData{}
-
- profileOpt := &model.ProfileSpec{}
- if err := json.Unmarshal([]byte(opt.GetProfile()), profileOpt); err != nil {
- return nil, err
- }
-
- options, err := json.Marshal(profileOpt.CustomProperties)
- if err != nil {
- return nil, err
- }
-
- if err := json.Unmarshal(options, &reqOptions); err != nil {
- return nil, err
- }
-
- // -------------------------------------------------------------------------
- reqOptions.Size = Gib2Mebi(opt.GetSize())
- reqBody := &ExtendVolumeReqBody{Data: reqOptions}
- respBody := &ExtendVolumeRespBody{}
- err = c.request("PUT", ep+volumeUrlPath+"/"+lunId, reqBody, respBody, token)
- return &respBody.Data, err
-}
-
-func (c *NimbleClient) CreateSnapshot(poolId string, opt *pb.CreateVolumeSnapshotOpts) (*SnapshotRespData, error) {
- ep, token, err := c.GetTokenByPoolId(poolId)
- if err != nil {
- return nil, err
- }
-
- // Parse options from Profile----------------------------------------------
- reqOptions := CreateSnapshotReqData{}
-
- profileOpt := &model.ProfileSpec{}
- if err := json.Unmarshal([]byte(opt.GetProfile()), profileOpt); err != nil {
- return nil, err
- }
-
- options, err := json.Marshal(profileOpt.CustomProperties)
- if err != nil {
- return nil, err
- }
-
- if err := json.Unmarshal(options, &reqOptions); err != nil {
- return nil, err
- }
- // -------------------------------------------------------------------------
-
- reqOptions.Name = opt.GetId()
- reqOptions.VolId = opt.GetMetadata()["LunId"]
- reqOptions.Description = TruncateDescription(opt.GetDescription())
-
- reqBody := &CreateSnapshotReqBody{Data: reqOptions}
- respBody := &SnapshotRespBody{}
- err = c.request("POST", ep+snapshotUrlPath, reqBody, respBody, token)
- return &respBody.Data, err
-}
-
-func (c *NimbleClient) DeleteSnapshot(poolId string, opt *pb.DeleteVolumeSnapshotOpts) error {
- snapId := opt.GetMetadata()["SnapId"]
- ep, token, err := c.GetTokenByPoolId(poolId)
- if err != nil {
- return err
- }
- // Nimble storage daes not support delete options. No need to parse values in profile.
- return c.request("DELETE", ep+snapshotUrlPath+"/"+snapId, nil, nil, token)
-}
-
-func (c *NimbleClient) ListInitiator(poolId string, initiatorResp *AllInitiatorRespBody) error {
- // Get endpoint and token for spwcific pool
- ep, token, err := c.GetTokenByPoolId(poolId)
- if err != nil {
- return err
- }
-
- err = c.request("GET", ep+initiatorUrlPath+"/detail", nil, initiatorResp, token)
- return err
-}
-
-func (c *NimbleClient) GetStorageInitiatorGrpId(poolId string, initiatorIqn string) (string, error) {
- // List all registered initiators
- respBody := &AllInitiatorRespBody{}
- err := c.ListInitiator(poolId, respBody)
- if err != nil {
- return "", err
- }
- for _, data := range respBody.Data {
- if data.Iqn == initiatorIqn {
- return data.InitiatorGroupId, nil
- }
- }
-
- return "", nil
-}
-
-func (c *NimbleClient) RegisterInitiatorIntoDefaultGrp(poolId string, opt *pb.CreateVolumeAttachmentOpts, initiatorGrpId string) (*InitiatorRespData, error) {
- // Get endpoint and token for spwcific pool
- ep, token, err := c.GetTokenByPoolId(poolId)
- if err != nil {
- return nil, err
- }
- reqOptions := CreateInitiatorReqData{}
- reqOptions.IpAddress = opt.GetHostInfo().Ip
- reqOptions.InitiatorGroupId = initiatorGrpId
- // For iSCSI initiator
- if opt.GetAccessProtocol() == ISCSIProtocol {
- reqOptions.AccessProtocol = "iscsi"
- reqOptions.Iqn = utils.GetInitiatorName(opt.GetHostInfo().GetInitiators(), opt.GetAccessProtocol())
- reqOptions.Label = opt.GetId()
- }
- // For FC initiator
- if opt.GetAccessProtocol() == FCProtocol {
- reqOptions.AccessProtocol = "fc"
- reqOptions.Wwpn = utils.GetInitiatorName(opt.GetHostInfo().GetInitiators(), opt.GetAccessProtocol())
- reqOptions.Alias = opt.GetId()
- }
-
- reqBody := &CreateInitiatorReqBody{Data: reqOptions}
- respBody := &InitiatorRespBody{}
- err = c.request("POST", ep+initiatorUrlPath, reqBody, respBody, token)
- return &respBody.Data, err
-}
-
-func (c *NimbleClient) GetDefaultInitiatorGrpId(poolId string, opt *pb.CreateVolumeAttachmentOpts) (string, error) {
-
- respBody := &AllInitiatorGrpRespBody{}
- err := c.ListInitiatorGrp(poolId, respBody)
- if err != nil {
- return "", err
- }
-
- // For iSCSI
- if opt.GetAccessProtocol() == ISCSIProtocol {
- log.Infof("%v: trying to get default iscsi initiator group ID: %v.", DriverName, IscsiInitiatorDefaultGrpName)
- for _, data := range respBody.Data {
- if data.Name == IscsiInitiatorDefaultGrpName {
- return data.Id, nil
- }
- }
- }
-
- // For FC
- if opt.GetAccessProtocol() == FCProtocol {
- log.Infof("%v: trying to get default fc initiator group ID: %v.", DriverName, FcInitiatorDefaultGrpName)
- for _, data := range respBody.Data {
- if data.Name == FcInitiatorDefaultGrpName {
- return data.Id, nil
- }
- }
- }
- return "", nil
-}
-
-func (c *NimbleClient) ListInitiatorGrp(poolId string, initiatorGrpResp *AllInitiatorGrpRespBody) error {
- // Get endpoint and token for spwcific pool
- ep, token, err := c.GetTokenByPoolId(poolId)
- if err != nil {
- return err
- }
-
- err = c.request("GET", ep+initiatorGrpUrlPath+"/detail", nil, initiatorGrpResp, token)
- return err
-}
-
-func (c *NimbleClient) CreateInitiatorDefaultGrp(poolId string, opt *pb.CreateVolumeAttachmentOpts) (*InitiatorGrpRespData, error) {
- ep, token, err := c.GetTokenByPoolId(poolId)
- if err != nil {
- return nil, err
- }
- reqOptions := CreateInitiatorGrpReqData{}
- // For iSCSI initiator
- if opt.GetAccessProtocol() == ISCSIProtocol {
- reqOptions.Name = IscsiInitiatorDefaultGrpName
- reqOptions.AccessProtocol = "iscsi"
- reqOptions.Description = "OpenSDS default iSCSI group"
- reqOptions.TargetSubnets = append(reqOptions.TargetSubnets, map[string]string{"label": "management"})
- }
- // For FC initiator
- if opt.GetAccessProtocol() == FCProtocol {
- reqOptions.Name = FcInitiatorDefaultGrpName
- reqOptions.AccessProtocol = "fc"
- reqOptions.Description = "OpenSDS default FC group"
- }
-
- reqBody := &CreateInitiatorGrpReqBody{Data: reqOptions}
- respBody := &InitiatorGrpRespBody{}
- err = c.request("POST", ep+initiatorGrpUrlPath, reqBody, respBody, token)
- return &respBody.Data, err
-}
-
-func (c *NimbleClient) AttachVolume(poolId string, volName string, initiatorGrpId string) (*AccessControlRespData, error) {
- storageVolumeId, err := c.GetStorageVolumeId(poolId, volName)
- if err != nil {
- return nil, err
- }
-
- ep, token, err := c.GetTokenByPoolId(poolId)
- if err != nil {
- return nil, err
- }
- reqOptions := CreateAccessControlReqData{}
- reqOptions.VolId = storageVolumeId
- reqOptions.InitiatorGroupId = initiatorGrpId
-
- reqBody := &CreateAccessControlReqBody{Data: reqOptions}
- respBody := &AccessControlRespBody{}
- err = c.request("POST", ep+accessControlUrlPath, reqBody, respBody, token)
-
- return &respBody.Data, err
-}
-
-func (c *NimbleClient) DetachVolume(poolId string, storageAceessId string) error {
- // Detach request does not give us response body
- ep, token, err := c.GetTokenByPoolId(poolId)
- if err != nil {
- return err
- }
-
- err = c.request("DELETE", ep+accessControlUrlPath+"/"+storageAceessId, nil, nil, token)
- return err
-}
-
-func (c *NimbleClient) GetTargetVolumeInfo(poolId string, volName string) (string, string, error) {
- ep, _, err := c.GetTokenByPoolId(poolId)
- if err != nil {
- return "", "", err
- }
- respBody, err := c.ListVolume(poolId)
- for _, data := range respBody.Data {
- if data.Name == volName {
- return data.TargetName, ep, err
- }
- }
-
- return "", "", fmt.Errorf("%v: couldnot get volume target.\n", DriverName)
-}
diff --git a/contrib/drivers/hpe/nimble/common.go b/contrib/drivers/hpe/nimble/common.go
deleted file mode 100755
index ab03d03db..000000000
--- a/contrib/drivers/hpe/nimble/common.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nimble
-
-import (
- "crypto/md5"
- "encoding/hex"
- "strings"
-)
-
-const UnitGi = 1024 * 1024 * 1024
-const UnitMebi = 1024
-
-func EncodeName(id string) string {
- h := md5.New()
- h.Write([]byte(id))
- encodedName := hex.EncodeToString(h.Sum(nil))
- prefix := strings.Split(id, "-")[0] + "-"
- postfix := encodedName[:MaxNameLength-len(prefix)]
- return prefix + postfix
-}
-
-func TruncateDescription(desc string) string {
- if len(desc) > MaxDescriptionLength {
- desc = desc[:MaxDescriptionLength]
- }
- return desc
-}
-
-func Byte2Gib(byteSize int64) int64 {
- return byteSize / UnitGi
-}
-
-func Gib2Mebi(gigabyte int64) int64 {
- return gigabyte * UnitMebi
-}
diff --git a/contrib/drivers/hpe/nimble/constants.go b/contrib/drivers/hpe/nimble/constants.go
deleted file mode 100755
index de2f81e1c..000000000
--- a/contrib/drivers/hpe/nimble/constants.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nimble
-
-const (
- DriverName = "hpe_nimble"
-)
-
-const (
- ThickLuntype = 0
- ThinLuntype = 1
- MaxNameLength = 31
- MaxDescriptionLength = 170
- PortNumPerContr = 2
- PwdExpired = 3
- PwdReset = 4
-)
-
-// Error Code
-const (
- ErrorUnauthorizedToServer = "SM_http_unauthorized"
- ErrorSmVolSizeDecreased = "SM_vol_size_decreased"
- ErrorSmHttpConflict = "SM_http_conflict"
-)
diff --git a/contrib/drivers/hpe/nimble/model.go b/contrib/drivers/hpe/nimble/model.go
deleted file mode 100755
index 7f84295f9..000000000
--- a/contrib/drivers/hpe/nimble/model.go
+++ /dev/null
@@ -1,478 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nimble
-
-type AuthRespBody struct {
- Data AuthRespData `json:"data"`
-}
-
-type AuthRespData struct {
- AppName string `json:"app_name"`
- CreationTime int `json:"creation_time"`
- Id int `json:"id"`
- LastModified int `json:"last_modified"`
- SessionToken string `json:"session_token"`
- SourceIp string `json:"source_ip"`
- UserName string `json:"username"`
-}
-
-type ClinetErrors struct {
- Errs []error
-}
-
-type ArrayInnerErrorBody struct {
- Errs []ArrayInnerErrorResp `json:"messages"`
-}
-
-type ArrayInnerErrorResp struct {
- Code string `json:"code"`
- Severity string `json:"severity"`
- Text string `json:"text"`
-}
-
-type StoragePoolsRespBody struct {
- StartRow int `json:"startRow"`
- EndRow int `json:"endRow"`
- TotalRows int `json:"totalRows"`
- Data []StoragePoolRespData `json:"data"`
-}
-
-type StoragePoolRespData struct {
- Id string `json:"id"`
- Name string `json:"name"`
- Description string `json:"description"`
- TotalCapacity int64 `json:"capacity"`
- FreeCapacity int64 `json:"free_space"`
- ArrayList []ArrayList `json:"array_list"`
- Endpoint string
- Token string
-}
-
-type ArrayList struct {
- Id string `json:"id"`
- ArrayId string `json:"array_id"`
- Name string `json:"name"`
- ArrayName string `json:"array_name"`
- Usage int64 `json:"usage"`
- UsageValid bool `json:"usage_valid"`
- Migrate string `json:"migrate"`
- EvacUsage int64 `json:"evac_usage"`
- EvacTime int64 `json:"evac_time"`
- SnapUsageCompressedBytes int64 `json:"snap_usage_compressed_bytes"`
- UsableCapacity int64 `json:"usable_capacity"`
- VolUsageCompressedBytes int64 `json:"vol_usage_compressed_bytes"`
-}
-
-type AuthOptions struct {
- Username string `yaml:"username,omitempty"`
- Password string `yaml:"password,omitempty"`
- Endpoints string `yaml:"endpoints,omitempty"`
- Insecure bool `yaml:"insecure,omitempty"`
-}
-
-type NimbleClient struct {
- user string
- passwd string
- endpoints []string
- tokens []string
- insecure bool
-}
-
-type VolumeRespBody struct {
- Data VolumeRespData `json:"data"`
-}
-
-type AllVolumeRespBody struct {
- StartRow int `json:"startRow"`
- EndRow int `json:"endRow"`
- TotalRows int `json:"totalRows"`
- Data []VolumeRespData `json:"data"`
-}
-
-type VolumeRespData struct {
- AgentType string `json:"agent_type"`
- AppCategory string `json:"app_category"`
- AppUuid string `json:"app_uuid"`
- AvgStatsLast5mins interface{} `json:"avg_stats_last_5mins"`
- BaseSnapId string `json:"base_snap_id"`
- BaseSnapName string `json:"base_snap_name"`
- BlockSize int64 `json:"block_size"`
- CacheNeededForPin int64 `json:"cache_needed_for_pin"`
- CachePinned bool `json:"cache_pinned"`
- CachePolicy string `json:"cache_policy"`
- CachingEnabled bool `json:"caching_enabled"`
- CksumLastVerified int64 `json:"cksum_last_verified"`
- Clone bool `json:"clone"`
- ContentReplErrorsFound bool `json:"content_repl_errors_found"`
- CreationTime int64 `json:"creation_time"`
- DedupeEnabled bool `json:"dedupe_enabled"`
- Description string `json:"description"`
- DestPoolId string `json:"dest_pool_id"`
- DestPoolName string `json:"dest_pool_name"`
- EncryptionCipher string `json:"encryption_cipher"`
- FolderId string `json:"folder_id"`
- FolderName string `json:"folder_name"`
- FullName string `json:"full_name"`
- Id string `json:"id"`
- LastContentSnapBrCgUid int64 `json:"last_content_snap_br_cg_uid"`
- LastContentSnapBrGid int64 `json:"last_content_snap_br_gid"`
- LastContentSnapId int64 `json:"last_content_snap_id"`
- LastModified int64 `json:"last_modified"`
- LastReplicatedSnap interface{} `json:"last_replicated_snap"`
- LastSnap interface{} `json:"last_snap"`
- Limit int64 `json:"limit"`
- LimitIops int64 `json:"limit_iops"`
- LimitMbps int64 `json:"limit_mbps"`
- Metadata interface{} `json:"metadata"`
- MoveAborting bool `json:"move_aborting"`
- MoveBytesMigrated int64 `json:"move_bytes_migrated"`
- MoveBytesRemaining int64 `json:"move_bytes_remaining"`
- MoveEstComplTime int64 `json:"move_est_compl_time"`
- MoveStartTime int64 `json:"move_start_time"`
- MultiInitiator bool `json:"multi_initiator"`
- Name string `json:"name"`
- NeedsContentRepl bool `json:"needs_content_repl"`
- NumConnections int64 `json:"num_connections"`
- NumFcConnections int64 `json:"num_fc_connections"`
- NumIscsiConnections int64 `json:"num_iscsi_connections"`
- NumSnaps int64 `json:"num_snaps"`
- OfflineReason interface{} `json:"offline_reason"`
- Online bool `json:"online"`
- OnlineSnaps interface{} `json:"online_snaps"`
- OwnedByGroup string `json:"owned_by_group"`
- OwnedByGroupId string `json:"owned_by_group_id"`
- ParentVolId string `json:"parent_vol_id"`
- ParentVolName string `json:"parent_vol_name"`
- PerfpolicyId string `json:"perfpolicy_id"`
- PerfpolicyName string `json:"perfpolicy_name"`
- PinnedCacheSize int64 `json:"pinned_cache_size"`
- PoolId int64 `json:"pool_id"`
- PoolName string `json:"pool_name"`
- PreviouslyDeduped bool `json:"previously_deduped"`
- ProjectedNumSnaps int64 `json:"projected_num_snaps"`
- ProtectionType string `json:"protection_type"`
- ReadOnly bool `json:"read_only"`
- Reserve int64 `json:"reserve"`
- SearchName string `json:"search_name"`
- SerialNumber string `json:"serial_number"`
- Size int64 `json:"size"`
- SnapLimit int64 `json:"snap_limit"`
- SnapLimitPercent int64 `json:"snap_limit_percent"`
- SnapReserve int64 `json:"snap_reserve"`
- SnapUsageCompressedBytes int64 `json:"snap_usage_compressed_bytes"`
- SnapUsagePopulatedBytes int64 `json:"snap_usage_populated_bytes"`
- SnapUsageUncompressedBytes int64 `json:"snap_usage_uncompressed_bytes"`
- SnapWarnLevel int64 `json:"snap_warn_level"`
- SpaceUsageLevel string `json:"space_usage_level"`
- TargetName string `json:"target_name"`
- ThinlyProvisioned bool `json:"thinly_provisioned"`
- TotalUsageBytes int64 `json:"total_usage_bytes"`
- UpstreamCachePinned bool `json:"upstream_cache_pinned"`
- UsageValid bool `json:"usage_valid"`
- VolState string `json:"vol_state"`
- VolUsageCompressedBytese int64 `json:"vol_usage_compressed_bytes"`
- VolUsageUncompressedBytes int64 `json:"vol_usage_uncompressed_bytes"`
- VolcollId string `json:"volcoll_id"`
- VolcollName string `json:"volcoll_name"`
- VpdIeee0 string `json:"vpd_ieee0"`
- VpdIeee1 string `json:"vpd_ieee1"`
- VpdT10 string `json:"vpd_t10"`
- WarnLevel int64 `json:"warn_level"`
- IscsiSessions interface{} `json:"iscsi_sessions"`
- FcSessions interface{} `json:"fc_sessions"`
- AccessControlRecords interface{} `json:"access_control_records"`
-}
-
-type SnapshotRespBody struct {
- Data SnapshotRespData `json:"data"`
-}
-
-type AllSnapshotRespBody struct {
- StartRow int `json:"startRow"`
- EndRow int `json:"endRow"`
- TotalRows int `json:"totalRows"`
- Data []SnapshotRespData `json:"data"`
-}
-
-type SnapshotRespData struct {
- AccessControlRecords interface{} `json:"access_control_records"`
- AgentType string `json:"agent_type"`
- AppUuid string `json:"app_uuid"`
- CreationTime int64 `json:"creation_time"`
- Description string `json:"description"`
- Id string `json:"id"`
- IsReplica bool `json:"is_replica"`
- IsUnmanaged bool `json:"is_unmanaged"`
- LastModified int64 `json:"last_modified"`
- Metadata interface{} `json:"metadata"`
- Name string `json:"name"`
- NewDataCompressedBytes int64 `json:"new_data_compressed_bytes"`
- NewDataUncompressedBytes string `json:"new_data_uncompressed_bytes"`
- NewDataValid bool `json:"new_data_valid"`
- OfflineReason string `json:"offline_reason"`
- Online bool `json:"online"`
- OriginName string `json:"origin_name"`
- ReplicationStatus interface{} `json:"replication_status"`
- ScheduleId string `json:"schedule_id"`
- ScheduleName string `json:"schedule_name"`
- SerialNumber string `json:"serial_number"`
- Size int64 `json:"size"`
- SnapCollectionId string `json:"snap_collection_id"`
- SnapCollectionName string `json:"snap_collection_name"`
- TargetName string `json:"target_name"`
- VolId string `json:"vol_id"`
- VolName string `json:"vol_name"`
- VpdIeee0 string `json:"vpd_ieee0"`
- VpdIeee1 string `json:"vpd_ieee1"`
- VpdT10 string `json:"vpd_t10"`
- Writable bool `json:"writable"`
-}
-
-type LoginReqBody struct {
- Data LoginReqData `json:"data"`
-}
-type LoginReqData struct {
- Username string `json:"username"`
- Password string `json:"password"`
-}
-
-type CreateVolumeReqBody struct {
- Data CreateVolumeReqData `json:"data"`
-}
-
-type CreateVolumeReqData struct {
- Name string `json:"name"` //not meta
- Size int64 `json:"size"` //not meta
- Description string `json:"description,omitempty"` //not meta
- PerfpolicyId string `json:"perfpolicy_id,omitempty"`
- Reserve int64 `json:"reserve,omitempty"`
- WarnLevel int64 `json:"warn_level,omitempty"`
- Limit int64 `json:"limit,omitempty"`
- SnapReserve int64 `json:"snap_reserve,omitempty"` //not meta
- SnapWarnLevel int64 `json:"snap_warn_level,omitempty"`
- SnapLimit int64 `json:"snap_limit,omitempty"`
- SnapLimitPercent int64 `json:"snap_limit_percent,omitempty"`
- Online *bool `json:"online,omitempty"`
- OwnedByGroupId string `json:"owned_by_group_id,omitempty"`
- MultiInitiator bool `json:"multi_initiator,omitempty"`
- PoolId string `json:"pool_id"` //not meta
- ReadOnly bool `json:"read_only,omitempty"`
- BlockSize int64 `json:"block_size,omitempty"`
- Clone bool `json:"clone,omitempty"`
- BaseSnapId string `json:"base_snap_id,omitempty"` //not meta
- AgentType string `json:"agent_type,omitempty"`
- DestPoolId string `json:"dest_pool_id,omitempty"`
- CachePinned *bool `json:"cache_pinned,omitempty"`
- EncryptionCipher string `json:"encryption_cipher,omitempty"`
- AppUuid string `json:"app_uuid,omitempty"`
- FolderId string `json:"folder_id,omitempty"`
- Metadata interface{} `json:"metadata,omitempty"`
- DedupeEnabled *bool `json:"dedupe_enabled,omitempty"`
- LimitIops int64 `json:"limit_iops,omitempty"`
- LimitMbps int64 `json:"limit_mbps,omitempty"`
-}
-
-type ExtendVolumeReqBody struct {
- Data ExtendVolumeReqData `json:"data"`
-}
-type ExtendVolumeReqData struct {
- Name string `json:"name,omitempty"`
- Size int64 `json:"size"` //not meta
- Description string `json:"description,omitempty"`
- PerfpolicyId string `json:"perfpolicy_id,omitempty"`
- Reserve int64 `json:"reserve,omitempty"`
- WarnLevel int64 `json:"warn_level,omitempty"`
- Limit int64 `json:"limit,omitempty"`
- SnapReserve int64 `json:"snap_reserve,omitempty"` //not meta
- SnapWarnLevel int64 `json:"snap_warn_level,omitempty"`
- SnapLimit int64 `json:"snap_limit,omitempty"`
- SnapLimitPercent int64 `json:"snap_limit_percent,omitempty"`
- Online *bool `json:"online,omitempty"`
- OwnedByGroupId string `json:"owned_by_group_id,omitempty"`
- MultiInitiator bool `json:"multi_initiator,omitempty"`
- ReadOnly bool `json:"read_only,omitempty"`
- BlockSize int64 `json:"block_size,omitempty"`
- VolcollId string `json:"volcoll_id,omitempty"`
- AgentType string `json:"agent_type,omitempty"`
- Force *bool `json:"force,omitempty"`
- CachePinned *bool `json:"cache_pinned,omitempty"`
- AppUuid string `json:"app_uuid,omitempty"`
- FolderId string `json:"folder_id,omitempty"`
- Metadata interface{} `json:"metadata,omitempty"`
- CachingEnabled *bool `json:"caching_enabled,omitempty"`
- DedupeEnabled *bool `json:"dedupe_enabled,omitempty"`
- LimitIops int64 `json:"limit_iops,omitempty"`
- LimitMbps int64 `json:"limit_mbps,omitempty"`
-}
-type ExtendVolumeRespBody struct {
- Data VolumeRespData `json:"data"`
-}
-
-type CreateSnapshotReqBody struct {
- Data CreateSnapshotReqData `json:"data"`
-}
-
-type CreateSnapshotReqData struct {
- Name string `json:"name"`
- Description string `json:"description,omitempty"`
- VolId string `json:"vol_id"`
- Online *bool `json:"online,omitempty"`
- Writable *bool `json:"writable,omitempty"`
- AppUuid string `json:"app_uuid,omitempty"`
- Metadata interface{} `json:"metadata,omitempty"`
- AgentType string `json:"agent_type,omitempty"`
-}
-
-type OfflineVolumeReqBody struct {
- Data OfflineVolumeReqData `json:"data"`
-}
-
-type OfflineVolumeReqData struct {
- Online bool `json:"online"`
- Force bool `json:"force"`
-}
-
-type AllInitiatorRespBody struct {
- StartRow int `json:"startRow"`
- EndRow int `json:"endRow"`
- TotalRows int `json:"totalRows"`
- Data []InitiatorRespData `json:"data"`
-}
-
-type InitiatorRespBody struct {
- Data InitiatorRespData `json:"data"`
-}
-
-type InitiatorRespData struct {
- Id string `json:"id"`
- AccessProtocol string `json:"access_protocol"`
- InitiatorGroupId string `json:"initiator_group_id"`
- InitiatorGroupName string `json:"initiator_group_name"`
- Label string `json:"label"`
- Iqn string `json:"iqn"`
- IpAddress string `json:"ip_address"`
- Alias string `json:"alias"`
- Wwpn string `json:"wwpn"`
- CreationTime int64 `json:"creation_time"`
- LastModified int64 `json:"last_modified"`
-}
-
-type CreateInitiatorReqBody struct {
- Data CreateInitiatorReqData `json:"data"`
-}
-
-type CreateInitiatorReqData struct {
- AccessProtocol string `json:"access_protocol,omitempty"`
- InitiatorGroupId string `json:"initiator_group_id,omitempty"`
- Label string `json:"label,omitempty"`
- Iqn string `json:"iqn,omitempty"`
- IpAddress string `json:"ip_address,omitempty"`
- Alias string `json:"alias,omitempty"`
- Wwpn string `json:"wwpn,omitempty"`
-}
-
-type AllInitiatorGrpRespBody struct {
- StartRow int `json:"startRow"`
- EndRow int `json:"endRow"`
- TotalRows int `json:"totalRows"`
- Data []InitiatorGrpRespData `json:"data"`
-}
-
-type InitiatorGrpRespBody struct {
- Data InitiatorGrpRespData `json:"data"`
-}
-
-type InitiatorGrpRespData struct {
- Id string `json:"id"`
- Name string `json:"name"`
- FullName string `json:"full_name"`
- SearchName string `json:"search_name"`
- Description string `json:"description"`
- AccessProtocol string `json:"access_protocol"`
- HostType string `json:"host_type"`
- TargetSubnets []map[string]string `json:"target_subnets"`
- IscsiInitiators []map[string]string `json:"iscsi_initiators"`
- FcInitiators []map[string]string `json:"fc_initiators"`
- CreationTime int64 `json:"creation_time"`
- LastModified int64 `json:"last_modified"`
- AppUuid string `json:"app_uuid"`
- VolumeCount string `json:"volume_count"`
- VolumeList []map[string]string `json:"volume_list"`
- NumConnections int64 `json:"num_connections"`
-}
-
-type CreateInitiatorGrpReqBody struct {
- Data CreateInitiatorGrpReqData `json:"data"`
-}
-
-type CreateInitiatorGrpReqData struct {
- Name string `json:"name,omitempty"`
- Description string `json:"description,omitempty"`
- AccessProtocol string `json:"access_protocol,omitempty"`
- HostType string `json:"host_type,omitempty"`
- TargetSubnets []map[string]string `json:"target_subnets,omitempty"`
- IscsiInitiators []map[string]string `json:"iscsi_initiators,omitempty"`
- FcInitiators []map[string]string `json:"fc_initiators,omitempty"`
- AppUuid string `json:"app_uuid,omitempty"`
-}
-
-type CreateAccessControlReqBody struct {
- Data CreateAccessControlReqData `json:"data"`
-}
-
-type CreateAccessControlReqData struct {
- ApplyTo string `json:"apply_to,omitempty"`
- ChapUserId string `json:"chap_user_id,omitempty"`
- InitiatorGroupId string `json:"initiator_group_id,omitempty"`
- Lun string `json:"lun,omitempty"`
- VolId string `json:"vol_id,omitempty"`
- PeId string `json:"pe_id,omitempty"`
- SnapId string `json:"snap_id,omitempty"`
- PeIds []string `json:"pe_ids,omitempty"`
-}
-
-type AllAccessControlRespBody struct {
- StartRow int `json:"startRow"`
- EndRow int `json:"endRow"`
- TotalRows int `json:"totalRows"`
- Data []AccessControlRespData `json:"data"`
-}
-
-type AccessControlRespBody struct {
- Data AccessControlRespData `json:"data"`
-}
-
-type AccessControlRespData struct {
- Id string `json:"id"`
- ApplyTo string `json:"apply_to"`
- ChapUserId string `json:"chap_user_id"`
- ChapUserName string `json:"chap_user_name"`
- InitiatorGroupId string `json:"initiator_group_id"`
- InitiatorGroupName string `json:"initiator_group_name"`
- Lun int64 `json:"lun"`
- VolId string `json:"vol_id"`
- VolName string `json:"vol_name"`
- VolAgentType string `json:"vol_agent_type"`
- PeId string `json:"pe_id"`
- PeName string `json:"pe_name"`
- PeLun string `json:"pe_lun"`
- SnapId string `json:"snap_id"`
- SnapName string `json:"snap_name"`
- PeIds []string `json:"pe_ids"`
- SnapLuns []string `json:"snapluns"`
- CreationTime int64 `json:"creation_time"`
- LastModified int64 `json:"last_modified"`
- AccessProtocol string `json:"access_protocol"`
-}
diff --git a/contrib/drivers/hpe/nimble/nimble.go b/contrib/drivers/hpe/nimble/nimble.go
deleted file mode 100755
index b953da4f6..000000000
--- a/contrib/drivers/hpe/nimble/nimble.go
+++ /dev/null
@@ -1,348 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nimble
-
-import (
- "fmt"
-
- log "github.com/golang/glog"
- "github.com/opensds/opensds/contrib/drivers/utils"
- . "github.com/opensds/opensds/contrib/drivers/utils/config"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- "github.com/opensds/opensds/pkg/utils/config"
- uuid "github.com/satori/go.uuid"
-)
-
-const (
- DefaultConfPath = "/etc/opensds/driver/hpe_nimble.yaml"
- NamePrefix = "opensds"
-)
-
-type Config struct {
- AuthOptions `yaml:"authOptions"`
- Pool map[string]PoolProperties `yaml:"pool,flow"`
-}
-
-type Driver struct {
- conf *Config
- client *NimbleClient
-}
-
-func (d *Driver) Setup() (err error) {
-
- conf := &Config{}
- d.conf = conf
- path := config.CONF.OsdsDock.Backends.HpeNimble.ConfigPath
- if "" == path {
- path = DefaultConfPath
- }
- Parse(conf, path)
-
- d.client, err = NewClient(&d.conf.AuthOptions)
- if err != nil {
- log.Errorf("%v: get new client failed.\n%v", DriverName, err)
- return err
- }
-
- return nil
-}
-
-func (d *Driver) Unset() error {
- return nil
-}
-
-func (d *Driver) CreateVolume(opt *pb.CreateVolumeOpts) (*model.VolumeSpec, error) {
- log.Infof("%v: try to create volume...", DriverName)
-
- poolId, err := d.client.GetPoolIdByName(opt.GetPoolName())
- if err != nil {
- return nil, err
- }
- lun, err := d.client.CreateVolume(poolId, opt)
- if err != nil {
- log.Errorf("%v: create Volume Failed: %v", DriverName, err)
- return nil, err
- }
- log.Infof("%v: create volume success.", DriverName)
- return &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Size: Byte2Gib(lun.Size),
- Description: lun.Description,
- AvailabilityZone: opt.GetAvailabilityZone(),
- Metadata: map[string]string{
- "Group": lun.OwnedByGroup,
- "Iqn": lun.TargetName,
- "LunId": lun.Id,
- "PoolId": poolId,
- },
- }, nil
-}
-
-func (d *Driver) DeleteVolume(opt *pb.DeleteVolumeOpts) error {
- log.Infof("%v: Trying delete volume ...", DriverName)
- poolId := opt.GetMetadata()["PoolId"]
- err := d.client.DeleteVolume(poolId, opt)
- if err != nil {
- log.Errorf("%v: delete volume failed, volume id =%s , error:%s", DriverName, opt.GetId(), err)
- return err
- }
- log.Infof("%v: remove volume success", DriverName)
- return nil
-}
-
-func (d *Driver) ExtendVolume(opt *pb.ExtendVolumeOpts) (*model.VolumeSpec, error) {
- log.Infof("%v: trying Extend volume...", DriverName)
- poolId := opt.GetMetadata()["PoolId"]
- _, err := d.client.ExtendVolume(poolId, opt)
- if err != nil {
- log.Errorf("%v: extend Volume Failed:", DriverName)
- return nil, err
- }
-
- log.Infof("%v: extend volume success.", DriverName)
- return &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Size: opt.GetSize(),
- Description: opt.GetDescription(),
- AvailabilityZone: opt.GetAvailabilityZone(),
- }, nil
-}
-
-func (d *Driver) CreateSnapshot(opt *pb.CreateVolumeSnapshotOpts) (*model.VolumeSnapshotSpec, error) {
- log.Infof("%v: trying create snapshot...", DriverName)
- poolId := opt.GetMetadata()["PoolId"]
- snap, err := d.client.CreateSnapshot(poolId, opt)
- if err != nil {
- return nil, err
- }
-
- log.Infof("%v: create snapshot success.", DriverName)
- return &model.VolumeSnapshotSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Description: opt.GetDescription(),
- VolumeId: opt.GetVolumeId(),
- Size: 0,
- Metadata: map[string]string{
- "SnapId": snap.Id,
- },
- }, nil
-}
-
-func (d *Driver) DeleteSnapshot(opt *pb.DeleteVolumeSnapshotOpts) error {
- log.Infof("%v: trying delete snapshot...", DriverName)
- poolId := opt.GetMetadata()["PoolId"]
- err := d.client.DeleteSnapshot(poolId, opt)
- if err != nil {
- log.Errorf("%v: delete volume snapshot failed, volume snapshot id = %s , error: %v", DriverName, opt.GetId(), err)
- return err
- }
- log.Infof("%v: remove volume snapshot success, volume snapshot id=%v", DriverName, opt.GetId())
- return nil
-}
-
-func (d *Driver) ListPools() ([]*model.StoragePoolSpec, error) {
- log.Infof("%v: listPools ...", DriverName)
- var pols []*model.StoragePoolSpec
- sp, err := d.client.ListStoragePools()
- if err != nil {
- return nil, err
- }
-
- c := d.conf
-
- for _, pool := range sp {
- for grpName, _ := range c.Pool {
- if grpName == pool.Name && c.Pool[grpName].AvailabilityZone == pool.ArrayList[0].ArrayName {
-
- pol := &model.StoragePoolSpec{
- BaseModel: &model.BaseModel{
- Id: uuid.NewV5(uuid.NamespaceOID, pool.Id).String(),
- },
- Name: pool.Name,
- Description: pool.Description,
- TotalCapacity: Byte2Gib(pool.TotalCapacity),
- FreeCapacity: Byte2Gib(pool.FreeCapacity),
- StorageType: "block",
- AvailabilityZone: pool.ArrayList[0].ArrayName + "/" + pool.Name,
- Extras: c.Pool[grpName].Extras,
- }
- pols = append(pols, pol)
- break
- }
- }
- }
-
- // Error if there is NO valid storage grp
- if len(pols) == 0 {
- return nil, fmt.Errorf("%v: there are no valid storage pool. Pls check driver config.\n", DriverName)
- }
-
- return pols, nil
-}
-
-func (d *Driver) InitializeConnection(opt *pb.CreateVolumeAttachmentOpts) (*model.ConnectionInfo, error) {
-
- if opt.GetAccessProtocol() == ISCSIProtocol || opt.GetAccessProtocol() == FCProtocol {
- log.Infof("%v: trying initialize connection...", DriverName)
-
- poolId := opt.GetMetadata()["PoolId"]
- initiatorName := utils.GetInitiatorName(opt.GetHostInfo().GetInitiators(), opt.GetAccessProtocol())
- if initiatorName == "" || opt.GetHostInfo().Ip == "" {
- if opt.GetAccessProtocol() == ISCSIProtocol {
- return nil, fmt.Errorf("%v: pls set initiator IQN and IP address for %v protocol.\n", DriverName, opt.GetAccessProtocol())
- }
- if opt.GetAccessProtocol() == FCProtocol {
- return nil, fmt.Errorf("%v: pls set initiator WWPN for %v protocol.\n", DriverName, opt.GetAccessProtocol())
- }
- }
-
- storageInitiatorGrpId, err := d.client.GetStorageInitiatorGrpId(poolId, initiatorName)
- if err != nil {
- return nil, err
- }
-
- // If specified initiator is nothing, register new initiator into default group.
- if storageInitiatorGrpId == "" {
- log.Infof("%v: trying to get default initiator group ID.", DriverName)
- storageInitiatorGrpId, err = d.client.GetDefaultInitiatorGrpId(poolId, opt)
- if err != nil {
- return nil, err
- }
-
- // Create default initiator group
- if storageInitiatorGrpId == "" {
- log.Infof("%v: trying to create default initiator group..", DriverName)
- respBody, err := d.client.CreateInitiatorDefaultGrp(poolId, opt)
- if err != nil {
- return nil, err
- }
- storageInitiatorGrpId = respBody.Id
- }
-
- // Register new initiator
- log.Infof("%v: trying to register initiator into default group..", DriverName)
- _, err := d.client.RegisterInitiatorIntoDefaultGrp(poolId, opt, storageInitiatorGrpId)
- if err != nil {
- return nil, err
- }
- }
-
- // Attach Volume
- attachRespBody, err := d.client.AttachVolume(poolId, opt.GetVolumeId(), storageInitiatorGrpId)
- if err != nil {
- return nil, err
- }
-
- // Set storage attachment ID
- // TODO: Im not sure how to save attachment ID which is UUID on side of storage
- // This UUID will be needed when terminate attachment
- opt.Metadata[opt.GetId()] = attachRespBody.Id
-
- // Get Volume Info
- tgtIqnWwn, tgtMgmtIp, err := d.client.GetTargetVolumeInfo(poolId, opt.GetVolumeId())
- if err != nil {
- return nil, err
- }
- if opt.GetAccessProtocol() == ISCSIProtocol {
- log.Infof("%v: attach volume for iSCSI success.", DriverName)
-
- return &model.ConnectionInfo{
- DriverVolumeType: ISCSIProtocol,
- ConnectionData: map[string]interface{}{
- "targetDiscovered": true,
- "targetIQN": []string{tgtIqnWwn},
- "targetPortal": []string{tgtMgmtIp},
- "discard": false,
- "targetLun": attachRespBody.Lun,
- },
- }, nil
- }
- if opt.GetAccessProtocol() == FCProtocol {
- log.Infof("%v: attach volume for FC success.", DriverName)
- return &model.ConnectionInfo{
- DriverVolumeType: FCProtocol,
- ConnectionData: map[string]interface{}{
- "targetDiscovered": true,
- "targetWWNs": []string{tgtIqnWwn},
- "volumeId": opt.GetVolumeId(),
- "description": "hpe",
- "hostName": opt.GetHostInfo().Host,
- "targetLun": attachRespBody.Lun,
- },
- }, nil
- }
-
- }
-
- return nil, fmt.Errorf("%v: Only support FC or iSCSI.\n", DriverName)
-}
-
-func (d *Driver) TerminateConnection(opt *pb.DeleteVolumeAttachmentOpts) error {
- poolId := opt.GetMetadata()["PoolId"]
- err := d.client.DetachVolume(poolId, opt.GetMetadata()[opt.GetId()])
- if err != nil {
- return err
- }
-
- // Delete attach OSDS ID <-> storage attach ID from meta data
- // TODO: Im not sure this way is correct. Should review.
- delete(opt.Metadata, opt.GetId())
- log.Infof("%v: detach volume success.", DriverName)
- return nil
-}
-
-func (d *Driver) CopyVolume(opt *pb.CreateVolumeOpts, srcid, tgtid string) error {
- return &model.NotImplementError{S: "method initializeSnapshotConnection has not been implemented yet."}
-}
-
-func (d *Driver) PullSnapshot(snapIdentifier string) (*model.VolumeSnapshotSpec, error) {
- // Not used, do nothing
- return nil, nil
-}
-func (d *Driver) PullVolume(volIdentifier string) (*model.VolumeSpec, error) {
- // Not used , do nothing
- return nil, nil
-}
-
-// The interfaces blow are optional, so implement it or not depends on you.
-func (d *Driver) InitializeSnapshotConnection(opt *pb.CreateSnapshotAttachmentOpts) (*model.ConnectionInfo, error) {
- return nil, &model.NotImplementError{S: "method initializeSnapshotConnection has not been implemented yet."}
-}
-
-func (d *Driver) TerminateSnapshotConnection(opt *pb.DeleteSnapshotAttachmentOpts) error {
- return &model.NotImplementError{S: "method terminateSnapshotConnection has not been implemented yet."}
-}
-
-func (d *Driver) CreateVolumeGroup(opt *pb.CreateVolumeGroupOpts) (*model.VolumeGroupSpec, error) {
- return nil, &model.NotImplementError{"method createVolumeGroup has not been implemented yet"}
-}
-
-func (d *Driver) UpdateVolumeGroup(opt *pb.UpdateVolumeGroupOpts) (*model.VolumeGroupSpec, error) {
- return nil, &model.NotImplementError{"method updateVolumeGroup has not been implemented yet"}
-}
-
-func (d *Driver) DeleteVolumeGroup(opt *pb.DeleteVolumeGroupOpts) error {
- return &model.NotImplementError{"method deleteVolumeGroup has not been implemented yet"}
-}
diff --git a/contrib/drivers/huawei/fusionstorage/constants.go b/contrib/drivers/huawei/fusionstorage/constants.go
deleted file mode 100644
index 7309a3299..000000000
--- a/contrib/drivers/huawei/fusionstorage/constants.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package fusionstorage
-
-const (
- BasicURI = "/dsware/service/"
- UnitGiShiftBit = 10
- DefaultAZ = "default"
- NamePrefix = "opensds"
- LunId = "lunId"
- FusionstorageIscsi = "fusionstorage_iscsi"
- InitiatorNotExistErrorCodeVersion6 = "32155103"
- InitiatorNotExistErrorCodeVersion8 = "155103"
- VolumeAlreadyInHostErrorCode = "157001"
- CmdBin = "fsc_cli"
- DefaultConfPath = "/etc/opensds/driver/fusionstorage.yaml"
- ClientVersion6_3 = "6.3"
- ClientVersion8_0 = "8.0"
- MaxRetry = 3
-)
diff --git a/contrib/drivers/huawei/fusionstorage/fsclient.go b/contrib/drivers/huawei/fusionstorage/fsclient.go
deleted file mode 100644
index 04afe33e6..000000000
--- a/contrib/drivers/huawei/fusionstorage/fsclient.go
+++ /dev/null
@@ -1,586 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package fusionstorage
-
-import (
- "bytes"
- "crypto/tls"
- "encoding/json"
- "errors"
- "fmt"
- "io/ioutil"
- "log"
- "net/http"
- "runtime"
- "strconv"
- "strings"
- "time"
-
- pb "github.com/opensds/opensds/pkg/model/proto"
- "github.com/opensds/opensds/pkg/utils/exec"
- "github.com/opensds/opensds/pkg/utils/pwd"
-)
-
-var CliErrorMap = map[string]string{
- "50000001": "DSware error",
- "50150001": "Receive a duplicate request",
- "50150002": "Command type is not supported",
- "50150003": "Command format is error",
- "50150004": "Lost contact with major VBS",
- "50150005": "Volume does not exist",
- "50150006": "Snapshot does not exist",
- "50150007": "Volume already exists or name exists or name duplicates with a snapshot name",
- "50150008": "The snapshot has already existed",
- "50150009": "VBS space is not enough",
- "50150010": "The node type is error",
- "50150011": "Volume and snapshot number is beyond max",
- "50150012": "VBS is not ready",
- "50150013": "The ref num of node is not 0",
- "50150014": "The volume is not in the pre-deletion state.",
- "50150015": "The storage resource pool is faulty",
- "50150016": "VBS handle queue busy",
- "50150017": "VBS handle request timeout",
- "50150020": "VBS metablock is locked",
- "50150021": "VBS pool dose not exist",
- "50150022": "VBS is not ok",
- "50150023": "VBS pool is not ok",
- "50150024": "VBS dose not exist",
- "50150064": "VBS load SCSI-3 lock pr meta failed",
- "50150100": "The disaster recovery relationship exists",
- "50150101": "The DR relationship does not exist",
- "50150102": "Volume has existed mirror",
- "50150103": "The volume does not have a mirror",
- "50150104": "Incorrect volume status",
- "50150105": "The mirror volume already exists",
-}
-
-func NewCliError(code string) error {
- if msg, ok := CliErrorMap[code]; ok {
- return NewCliErrorBase(msg, code)
- }
- return NewCliErrorBase("CLI execute error", code)
-}
-
-type CliError struct {
- Msg string
- Code string
-}
-
-func (c *CliError) Error() string {
- return fmt.Sprintf("msg: %s, code:%s", c.Msg, c.Code)
-}
-
-func NewCliErrorBase(msg, code string) *CliError {
- return &CliError{Msg: msg, Code: code}
-}
-
-type FsClient struct {
- username string
- password string
- version string
- addess string
- headers map[string]string
- fmIp string
- fsaIp []string
-}
-
-// Command Root exectuer
-var rootExecuter = exec.NewRootExecuter()
-
-func newRestCommon(conf *Config) (*FsClient, error) {
- if conf.Version != ClientVersion6_3 && conf.Version != ClientVersion8_0 {
- return nil, fmt.Errorf("version %s does not support", conf.Version)
- }
-
- if conf.Version == ClientVersion6_3 {
- if len(conf.FmIp) == 0 || len(conf.FsaIp) == 0 {
- return nil, fmt.Errorf("get %s cli failed, FM ip or FSA ip can not be set to empty", ClientVersion6_3)
- }
- err := StartServer()
- if err != nil {
- return nil, fmt.Errorf("get new client failed, %v", err)
- }
- }
-
- var pwdCiphertext = conf.Password
-
- if conf.EnableEncrypted {
- // Decrypte the password
- pwdTool := pwd.NewPwdEncrypter(conf.PwdEncrypter)
- password, err := pwdTool.Decrypter(pwdCiphertext)
- if err != nil {
- return nil, err
- }
- pwdCiphertext = password
- }
-
- client := &FsClient{
- addess: conf.Url,
- username: conf.Username,
- password: pwdCiphertext,
- fmIp: conf.FmIp,
- fsaIp: conf.FsaIp,
- headers: map[string]string{"Content-Type": "application/json;charset=UTF-8"},
- }
-
- var err error
- for i := 1; i <= MaxRetry; i++ {
- log.Printf("try to login the client %d time", i)
- err = client.login()
- if err != nil {
- time.Sleep(5 * time.Second)
- continue
- }
- break
- }
-
- if err != nil {
- return nil, err
- }
-
- return client, nil
-}
-
-func (c *FsClient) getVersion() error {
- url := "rest/version"
- c.headers["Referer"] = c.addess + BasicURI
- content, err := c.request(url, "GET", true, nil)
- if err != nil {
- return fmt.Errorf("failed to get version, %v", err)
- }
-
- var v Version
- err = json.Unmarshal(content, &v)
- if err != nil {
- return fmt.Errorf("failed to unmarshal the result, %v", err)
- }
-
- c.version = v.CurrentVersion
-
- return nil
-}
-
-func (c *FsClient) login() error {
- c.getVersion()
- url := "/sec/login"
- data := map[string]string{"userName": c.username, "password": c.password}
- _, err := c.request(url, "POST", false, data)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (c *FsClient) logout() error {
- url := "/iam/logout"
- _, err := c.request(url, "POST", false, nil)
- if err != nil {
- return err
- }
- return nil
-}
-
-func (c *FsClient) queryPoolInfo() (*PoolResp, error) {
- url := "/storagePool"
- result, err := c.request(url, "GET", false, nil)
- if err != nil {
- return nil, err
- }
-
- var pools *PoolResp
- if err := json.Unmarshal(result, &pools); err != nil {
- return nil, err
- }
- return pools, nil
-}
-
-func (c *FsClient) createVolume(volName, poolId string, volSize int64) error {
- url := "/volume/create"
- polID, _ := strconv.Atoi(poolId)
- params := map[string]interface{}{"volName": volName, "volSize": volSize, "poolId": polID}
-
- if _, err := c.request(url, "POST", false, params); err != nil {
- return err
- }
- return nil
-}
-
-func (c *FsClient) deleteVolume(volName string) error {
- url := "/volume/delete"
- params := map[string]interface{}{"volNames": []string{volName}}
- _, err := c.request(url, "POST", false, params)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (c *FsClient) attachVolume(volName, manageIp string) error {
- url := "/volume/attach"
- params := map[string]interface{}{"volName": []string{volName}, "ipList": []string{manageIp}}
- _, err := c.request(url, "POST", false, params)
- if err != nil {
- return err
- }
- return nil
-}
-
-func (c *FsClient) createPort(initiator string) error {
- url := "iscsi/createPort"
- params := map[string]interface{}{"portName": initiator}
- _, err := c.request(url, "POST", true, params)
- if err != nil {
- return err
- }
- return nil
-}
-
-func (c *FsClient) queryPortInfo(initiator string) error {
- url := "iscsi/queryPortInfo"
- params := map[string]interface{}{"portName": initiator}
- _, err := c.request(url, "POST", true, params)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (c *FsClient) queryHostInfo(hostName string) (bool, error) {
- url := "iscsi/queryAllHost"
- result, err := c.request(url, "GET", true, nil)
- if err != nil {
- return false, err
- }
-
- var hostlist *HostList
-
- if err := json.Unmarshal(result, &hostlist); err != nil {
- return false, err
- }
-
- for _, v := range hostlist.HostList {
- if v.HostName == hostName {
- return true, nil
- }
- }
-
- return false, nil
-}
-
-func (c *FsClient) createHost(hostInfo *pb.HostInfo) error {
- url := "iscsi/createHost"
- params := map[string]interface{}{"hostName": hostInfo.GetHost(), "ipAddress": hostInfo.GetIp()}
- _, err := c.request(url, "POST", true, params)
- if err != nil {
- return err
- }
- return nil
-}
-
-func (c *FsClient) addPortToHost(hostName, initiator string) error {
- url := "iscsi/addPortToHost"
- params := map[string]interface{}{"hostName": hostName, "portNames": []string{initiator}}
- _, err := c.request(url, "POST", true, params)
- if err != nil {
- return err
- }
- return nil
-}
-
-func (c *FsClient) queryHostByPort(initiator string) (*PortHostMap, error) {
- url := "iscsi/queryHostByPort"
- params := map[string]interface{}{"portName": []string{initiator}}
- result, err := c.request(url, "POST", true, params)
- if err != nil {
- return nil, err
- }
-
- var portHostmap *PortHostMap
-
- if err := json.Unmarshal(result, &portHostmap); err != nil {
- return nil, err
- }
-
- return portHostmap, nil
-}
-
-func (c *FsClient) addLunsToHost(hostName, lunId string) error {
- url := "iscsi/addLunsToHost"
- params := map[string]interface{}{"hostName": hostName, "lunNames": []string{lunId}}
- _, err := c.request(url, "POST", true, params)
- if err != nil {
- return err
- }
- return nil
-}
-
-func (c *FsClient) queryHostLunInfo(hostName string) (*HostLunList, error) {
- url := "iscsi/queryHostLunInfo"
- params := map[string]interface{}{"hostName": hostName}
- result, err := c.request(url, "POST", true, params)
- if err != nil {
- return nil, err
- }
-
- var lunList *HostLunList
-
- if err := json.Unmarshal(result, &lunList); err != nil {
- return nil, err
- }
-
- return lunList, nil
-}
-
-func (c *FsClient) queryIscsiPortalVersion6(initiator string) ([]string, error) {
- args := []string{
- "--op", "queryIscsiPortalInfo", "--portName", initiator,
- }
- out, err := c.RunCmd(args...)
- if err != nil {
- return nil, fmt.Errorf("query iscsi portal failed: %v", err)
- }
-
- if len(out) > 0 {
- return out, nil
- }
-
- return nil, fmt.Errorf("the iscsi target portal is empty.")
-}
-
-func (c *FsClient) getDeviceVersion() (*DeviceVersion, error) {
- url := "/version"
- result, err := c.request(url, "Get", false, nil)
- if err != nil {
- return nil, err
- }
-
- var version *DeviceVersion
-
- if err := json.Unmarshal(result, &version); err != nil {
- return nil, err
- }
-
- return version, nil
-}
-
-func (c *FsClient) queryIscsiPortalVersion8() (*IscsiPortal, error) {
- url := "cluster/dswareclient/queryIscsiPortal"
- params := map[string]interface{}{}
- result, err := c.request(url, "Post", true, params)
- if err != nil {
- return nil, err
- }
-
- var iscsiPortals *IscsiPortal
-
- if err := json.Unmarshal(result, &iscsiPortals); err != nil {
- return nil, err
- }
-
- return iscsiPortals, nil
-}
-
-func (c *FsClient) queryHostFromVolume(lunId string) ([]Host, error) {
- url := "iscsi/queryHostFromVolume"
- params := map[string]interface{}{"lunName": lunId}
- out, err := c.request(url, "POST", true, params)
- if err != nil {
- return nil, err
- }
-
- var hostlist *HostList
-
- if err := json.Unmarshal(out, &hostlist); err != nil {
- return nil, err
- }
-
- return hostlist.HostList, nil
-}
-
-func (c *FsClient) deleteLunFromHost(hostName, lunId string) error {
- url := "iscsi/deleteLunFromHost"
- params := map[string]interface{}{"hostName": hostName, "lunNames": []string{lunId}}
- _, err := c.request(url, "POST", true, params)
- if err != nil {
- return err
- }
- return nil
-}
-
-func (c *FsClient) deletePortFromHost(hostName, initiator string) error {
- url := "iscsi/deletePortFromHost"
- params := map[string]interface{}{"hostName": hostName, "portNames": []string{initiator}}
- _, err := c.request(url, "POST", true, params)
- if err != nil {
- return err
- }
- return nil
-}
-
-func (c *FsClient) deleteHost(hostName string) error {
- url := "iscsi/deleteHost"
- params := map[string]interface{}{"hostName": hostName}
- _, err := c.request(url, "POST", true, params)
- if err != nil {
- return err
- }
- return nil
-}
-
-func (c *FsClient) deletePort(initiator string) error {
- url := "iscsi/deletePort"
- params := map[string]interface{}{"portName": initiator}
- _, err := c.request(url, "POST", true, params)
- if err != nil {
- return err
- }
- return nil
-}
-
-func (c *FsClient) request(url, method string, isGetVersion bool, reqParams interface{}) ([]byte, error) {
- var callUrl string
- if !isGetVersion {
- callUrl = c.addess + BasicURI + c.version + url
- } else {
- callUrl = c.addess + BasicURI + url
- }
-
- // No verify by SSL
- tr := &http.Transport{
- TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
- }
- // initialize http client
- client := &http.Client{Transport: tr}
-
- var body []byte
- var err error
- if reqParams != nil {
- body, err = json.Marshal(reqParams)
- if err != nil {
- return nil, fmt.Errorf("failed to marshal the request parameters, url is %s, error is %v", callUrl, err)
- }
- }
-
- req, err := http.NewRequest(strings.ToUpper(method), callUrl, bytes.NewBuffer(body))
- if err != nil {
- return nil, fmt.Errorf("failed to initiate the request, url is %s, error is %v", callUrl, err)
- }
-
- // initiate the header
- for k, v := range c.headers {
- req.Header.Set(k, v)
- }
-
- // do the request
- resp, err := client.Do(req)
- if err != nil {
- return nil, fmt.Errorf("process request failed: %v, url is %s", err, callUrl)
- }
- defer resp.Body.Close()
-
- respContent, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return nil, fmt.Errorf("read from response body failed: %v, url is %s", err, callUrl)
- }
-
- if 400 <= resp.StatusCode && resp.StatusCode <= 599 {
- pc, _, line, _ := runtime.Caller(1)
- return nil, fmt.Errorf("return status code is: %s, return content is: %s, error function is: %s, error line is: %s, url is %s",
- strconv.Itoa(resp.StatusCode), string(respContent), runtime.FuncForPC(pc).Name(), strconv.Itoa(line), callUrl)
- }
-
- // Check the error code in the returned content
- var respResult *ResponseResult
- if err := json.Unmarshal(respContent, &respResult); err != nil {
- return nil, err
- }
-
- if respResult.RespCode != 0 {
- return nil, errors.New(string(respContent))
- }
-
- if c.headers["x-auth-token"] == "" && resp.Header != nil && len(resp.Header["X-Auth-Token"]) > 0 {
- c.headers["x-auth-token"] = resp.Header["X-Auth-Token"][0]
- }
-
- return respContent, nil
-}
-
-func StartServer() error {
- _, err := rootExecuter.Run(CmdBin, "--op", "startServer")
- if err != nil {
- return err
- }
- time.Sleep(3 * time.Second)
- return nil
-}
-
-func (c *FsClient) RunCmd(args ...string) ([]string, error) {
- var lines []string
- var result string
-
- args = append(args, "--manage_ip", c.fmIp, "--ip", "")
- for _, ip := range c.fsaIp {
- args[len(args)-1] = ip
- out, _ := rootExecuter.Run(CmdBin, args...)
- lines = strings.Split(strings.TrimSpace(out), "\n")
- if len(lines) > 0 {
- const resultPrefix = "result="
- for _, line := range lines {
- if strings.HasPrefix(line, resultPrefix) {
- result = line[len(resultPrefix):]
- }
- }
- if result == "0" {
- return lines[:len(lines)-1], nil
- }
- }
- }
-
- return nil, NewCliError(result)
-}
-
-func (c *FsClient) extendVolume(name string, newSize int64) error {
- url := "/volume/expand"
- params := map[string]interface{}{"volName": name, "newVolSize": newSize}
- _, err := c.request(url, "POST", false, params)
- if err != nil {
- return err
- }
- return nil
-}
-
-func (c *FsClient) createSnapshot(snapName, volName string) error {
- url := "/snapshot/create"
- params := map[string]interface{}{"volName": volName, "snapshotName": snapName}
- _, err := c.request(url, "POST", false, params)
- if err != nil {
- return err
- }
- return nil
-}
-
-func (c *FsClient) deleteSnapshot(snapName string) error {
- url := "/snapshot/delete"
- params := map[string]interface{}{"snapshotName": snapName}
- _, err := c.request(url, "POST", false, params)
- if err != nil {
- return err
- }
- return nil
-}
diff --git a/contrib/drivers/huawei/fusionstorage/fusionstorage.go b/contrib/drivers/huawei/fusionstorage/fusionstorage.go
deleted file mode 100644
index f0b5419b5..000000000
--- a/contrib/drivers/huawei/fusionstorage/fusionstorage.go
+++ /dev/null
@@ -1,523 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package fusionstorage
-
-import (
- "errors"
- "fmt"
- "os"
- "strconv"
- "strings"
-
- log "github.com/golang/glog"
- "github.com/opensds/opensds/contrib/drivers/utils"
- . "github.com/opensds/opensds/contrib/drivers/utils/config"
- . "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- "github.com/opensds/opensds/pkg/utils/config"
- uuid "github.com/satori/go.uuid"
-)
-
-func (d *Driver) Setup() error {
- conf := &Config{}
-
- d.Conf = conf
-
- path := config.CONF.OsdsDock.Backends.HuaweiFusionStorage.ConfigPath
- if path == "" {
- path = DefaultConfPath
- }
-
- Parse(conf, path)
-
- client, err := newRestCommon(conf)
- if err != nil {
- msg := fmt.Sprintf("get new client failed, %v", err)
- log.Error(msg)
- return errors.New(msg)
- }
-
- d.Client = client
-
- log.Info("get new client success")
- return nil
-}
-
-func (d *Driver) Unset() error {
- if d.Client == nil {
- return errors.New("cannot get client")
- }
- return d.Client.logout()
-}
-
-func EncodeName(id string) string {
- return NamePrefix + "-" + id
-}
-
-func (d *Driver) CreateVolume(opt *pb.CreateVolumeOpts) (*VolumeSpec, error) {
- name := EncodeName(opt.GetId())
- err := d.Client.createVolume(name, opt.GetPoolName(), opt.GetSize()<> UnitGiShiftBit,
- FreeCapacity: (p.TotalCapacity - p.UsedCapacity) >> UnitGiShiftBit,
- ConsumedCapacity: p.UsedCapacity >> UnitGiShiftBit,
- StorageType: c.Pool[poolId].StorageType,
- Extras: c.Pool[poolId].Extras,
- AvailabilityZone: c.Pool[poolId].AvailabilityZone,
- }
- if pol.AvailabilityZone == "" {
- pol.AvailabilityZone = DefaultAZ
- }
- pols = append(pols, pol)
- }
-
- log.Info("list pools success")
- return pols, nil
-}
-
-func (d *Driver) InitializeConnection(opt *pb.CreateVolumeAttachmentOpts) (*ConnectionInfo, error) {
- lunId := opt.GetMetadata()[LunId]
- if lunId == "" {
- msg := "lun id is empty"
- log.Error(msg)
- return nil, fmt.Errorf(msg)
- }
-
- hostInfo := opt.GetHostInfo()
-
- initiator := utils.GetInitiatorName(hostInfo.GetInitiators(), opt.GetAccessProtocol())
- hostName := hostInfo.GetHost()
-
- if initiator == "" || hostName == "" {
- msg := "host name or initiator cannot be empty"
- log.Error(msg)
- return nil, errors.New(msg)
- }
-
- // Create port if not exist.
- if err := d.CreatePortIfNotExist(initiator); err != nil {
- log.Error(err.Error())
- return nil, err
- }
-
- // Create host if not exist.
- if err := d.CreateHostIfNotExist(hostInfo); err != nil {
- log.Error(err.Error())
- return nil, err
- }
-
- // Add port to host if port not add to the host
- if err := d.AddPortToHost(initiator, hostName); err != nil {
- log.Error(err.Error())
- return nil, err
- }
-
- // Map volume to host
- err := d.Client.addLunsToHost(hostName, lunId)
- if err != nil && !strings.Contains(err.Error(), VolumeAlreadyInHostErrorCode) {
- msg := fmt.Sprintf("add luns to host failed: %v", err)
- log.Error(msg)
- return nil, errors.New(msg)
- }
-
- // Get target lun id
- targetLunId, err := d.GetTgtLunID(hostName, lunId)
- if err != nil {
- log.Error(err.Error())
- return nil, err
- }
-
- targetIQN, targetPortal, err := d.GetTargetPortal(initiator)
- if err != nil {
- msg := fmt.Sprintf("get target portals and iqns failed: %v", err)
- log.Errorf(msg)
- return nil, errors.New(msg)
- }
-
- connInfo := &ConnectionInfo{
- DriverVolumeType: opt.GetAccessProtocol(),
- ConnectionData: map[string]interface{}{
- "target_discovered": true,
- "volumeId": opt.GetVolumeId(),
- "description": "huawei",
- "hostName": hostName,
- "targetLun": targetLunId,
- "connect_type": FusionstorageIscsi,
- "initiator": initiator,
- "targetIQN": targetIQN,
- "targetPortal": targetPortal,
- },
- }
-
- log.Infof("initialize connection success: %v", connInfo)
- return connInfo, nil
-}
-
-func (d *Driver) GetTgtLunID(hostName, sourceLunID string) (int, error) {
- hostLunList, err := d.Client.queryHostLunInfo(hostName)
- if err != nil {
- return -1, fmt.Errorf("query host lun info failed: %v", err)
- }
-
- var targetLunId int
- for _, v := range hostLunList.LunList {
- if v.Name == sourceLunID {
- targetLunId = v.Id
- }
- }
-
- return targetLunId, nil
-}
-
-func (d *Driver) AddPortToHost(initiator, hostName string) error {
- hostPortMap, err := d.Client.queryHostByPort(initiator)
- if err != nil {
- return fmt.Errorf("query host by port failed: %v", err)
- }
-
- h, ok := hostPortMap.PortHostMap[initiator]
- if ok && h[0] != hostName {
- return fmt.Errorf("initiator is already added to another host, host name =%s", h[0])
- }
-
- if !ok {
- err = d.Client.addPortToHost(hostName, initiator)
- if err != nil {
- return fmt.Errorf("add port to host failed: %v", err)
- }
- }
-
- return nil
-}
-
-func (d *Driver) CreateHostIfNotExist(hostInfo *pb.HostInfo) error {
- isFind, err := d.Client.queryHostInfo(hostInfo.GetHost())
- if err != nil {
- return fmt.Errorf("query host info failed: %v", err)
- }
-
- if !isFind {
- err = d.Client.createHost(hostInfo)
- if err != nil {
- return fmt.Errorf("create host failed: %v", err)
- }
- }
-
- return nil
-}
-
-func (d *Driver) CreatePortIfNotExist(initiator string) error {
- err := d.Client.queryPortInfo(initiator)
- if err != nil {
- if strings.Contains(err.Error(), InitiatorNotExistErrorCodeVersion6) ||
- strings.Contains(err.Error(), InitiatorNotExistErrorCodeVersion8) {
- err := d.Client.createPort(initiator)
- if err != nil {
- return fmt.Errorf("create port failed: %v", err)
- }
- } else {
- return fmt.Errorf("query port info failed: %v", err)
- }
- }
- return nil
-}
-
-func (d *Driver) GetTargetPortal(initiator string) ([]string, []string, error) {
- if d.Conf.Version == ClientVersion6_3 {
- return d.GeTgtPortalAndIQNVersion6_3(initiator)
- }
- if d.Conf.Version == ClientVersion8_0 {
- return d.GeTgtPortalAndIQNVersion8_0()
- }
-
- return nil, nil, errors.New("cannot find any target portal and iqn")
-}
-
-func (d *Driver) GeTgtPortalAndIQNVersion6_3(initiator string) ([]string, []string, error) {
- targetPortalInfo, err := d.Client.queryIscsiPortalVersion6(initiator)
- if err != nil {
- msg := fmt.Sprintf("query iscsi portal failed: %v", err)
- log.Error(msg)
- return nil, nil, errors.New(msg)
- }
-
- var targetIQN []string
- var targetPortal []string
-
- for _, v := range targetPortalInfo {
- iscsiTarget := strings.Split(v, ",")
- targetIQN = append(targetIQN, iscsiTarget[1])
- targetPortal = append(targetPortal, iscsiTarget[0])
- }
-
- return targetIQN, targetPortal, nil
-}
-
-func (d *Driver) GeTgtPortalAndIQNVersion8_0() ([]string, []string, error) {
- targetPortalInfo, err := d.Client.queryIscsiPortalVersion8()
- if err != nil {
- msg := fmt.Sprintf("query iscsi portal failed: %v", err)
- log.Error(msg)
- return nil, nil, errors.New(msg)
- }
-
- var targetPortal []string
-
- for _, v := range targetPortalInfo.NodeResultList {
- for _, p := range v.PortalList {
- if p.Status == "active" {
- targetPortal = append(targetPortal, p.IscsiPortal)
- }
- }
- }
-
- if len(targetPortal) == 0 {
- return nil, nil, errors.New("the iscsi target portal is empty")
- }
-
- return nil, targetPortal, nil
-}
-
-func (d *Driver) TerminateConnection(opt *pb.DeleteVolumeAttachmentOpts) error {
- lunId := opt.GetMetadata()[LunId]
- if lunId == "" {
- msg := "lun id is empty."
- log.Error(msg)
- return errors.New(msg)
- }
-
- hostInfo := opt.GetHostInfo()
-
- initiator := utils.GetInitiatorName(hostInfo.GetInitiators(), opt.GetAccessProtocol())
- hostName := hostInfo.GetHost()
-
- if initiator == "" || hostName == "" {
- msg := "host name or initiator is empty."
- log.Error(msg)
- return errors.New(msg)
- }
-
- // Make sure that host is exist.
- if err := d.CheckHostIsExist(hostName); err != nil {
- return err
- }
-
- // Check whether the volume attach to the host
- if err := d.CheckVolAttachToHost(hostName, lunId); err != nil {
- return err
- }
-
- // Remove lun from host
- err := d.Client.deleteLunFromHost(hostName, lunId)
- if err != nil {
- msg := fmt.Sprintf("delete lun from host failed, %v", err)
- log.Error(msg)
- return errors.New(msg)
- }
-
- // Remove initiator and host if there is no lun belong to the host
- hostLunList, err := d.Client.queryHostLunInfo(hostName)
- if err != nil {
- msg := fmt.Sprintf("query host lun info failed, %v", err)
- log.Error(msg)
- return errors.New(msg)
- }
-
- if len(hostLunList.LunList) == 0 {
- d.Client.deletePortFromHost(hostName, initiator)
- d.Client.deleteHost(hostName)
- d.Client.deletePort(initiator)
- }
-
- log.Info("terminate Connection success.")
- return nil
-}
-
-func (d *Driver) CheckVolAttachToHost(hostName, lunId string) error {
- hostLunList, err := d.Client.queryHostLunInfo(hostName)
- if err != nil {
- msg := fmt.Sprintf("query host lun info failed, %v", err)
- log.Error(msg)
- return errors.New(msg)
- }
-
- var lunIsFind = false
- for _, v := range hostLunList.LunList {
- if v.Name == lunId {
- lunIsFind = true
- break
- }
- }
-
- if !lunIsFind {
- msg := fmt.Sprintf("the lun %s is not attach to the host %s", lunId, hostName)
- log.Error(msg)
- return errors.New(msg)
- }
-
- return nil
-}
-
-func (d *Driver) CheckHostIsExist(hostName string) error {
- hostIsFind, err := d.Client.queryHostInfo(hostName)
- if err != nil {
- msg := fmt.Sprintf("query host failed, host name %s, error: %v", hostName, err)
- log.Error(msg)
- return errors.New(msg)
- }
-
- if !hostIsFind {
- msg := fmt.Sprintf("host can not be found, host name =%s", hostName)
- log.Error(msg)
- return errors.New(msg)
- }
- return nil
-}
-
-func (d *Driver) PullVolume(volIdentifier string) (*VolumeSpec, error) {
- // Not used , do nothing
- return nil, nil
-}
-
-func (d *Driver) ExtendVolume(opt *pb.ExtendVolumeOpts) (*VolumeSpec, error) {
- err := d.Client.extendVolume(EncodeName(opt.GetId()), opt.GetSize()< 0 {
- data["vstorename"] = c.vstoreName
- }
-
- c.deviceId = ""
- for _, ep := range c.endpoints {
- url := ep + "/xxxxx/sessions"
- auth := &AuthResp{}
- b, header, err := c.doRequest("POST", url, data)
- if err != nil {
- log.Error("Login failed,", err)
- continue
- }
- json.Unmarshal(b, auth)
- c.iBaseToken = auth.Data.IBaseToken
- if auth.Data.AccountState == PwdReset || auth.Data.AccountState == PwdExpired {
- msg := "Password has expired or must be reset,please change the password."
- log.Error(msg)
- c.logout()
- return errors.New(msg)
- }
- if auth.Data.DeviceId == "" {
- continue
- }
- c.deviceId = auth.Data.DeviceId
- c.urlPrefix = ep + "/" + auth.Data.DeviceId
- // Get the first controller that can be connected, then break
-
- c.cookie = header.Get("set-cookie")
- break
- }
-
- if c.deviceId == "" {
- msg := "Failed to login with all rest URLs"
- log.Error(msg)
- return errors.New(msg)
- }
- return nil
-}
-
-func (c *OceanStorClient) logout() error {
- if c.urlPrefix == "" {
- return nil
- }
- return c.request("DELETE", "/sessions", nil, nil)
-}
-
-func (c *OceanStorClient) CreateVolume(name string, size int64, desc string, poolId string, provPolicy string) (*Lun, error) {
- // default alloc type is thick
- allocType := ThickLunType
- if provPolicy == "Thin" {
- allocType = ThinLunType
- }
- data := map[string]interface{}{
- "NAME": name,
- "CAPACITY": Gb2Sector(size),
- "DESCRIPTION": desc,
- "ALLOCTYPE": allocType,
- "PARENTID": poolId,
- "WRITEPOLICY": 1,
- }
- lun := &LunResp{}
- err := c.request("POST", "/lun", data, lun)
- return &lun.Data, err
-}
-
-func (c *OceanStorClient) CreateLunCopy(name, srcid, tgtid, copyspeed string) (string, error) {
- url := "/luncopy"
- if !utils.Contains(LunCopySpeedTypes, copyspeed) {
- log.Warningf("The copy speed %s is invalid, using Medium Speed instead", copyspeed)
- copyspeed = LunCopySpeedMedium
- }
- data := map[string]interface{}{
- "TYPE": ObjectTypeLunCopy,
- "NAME": name,
- "DESCRIPTION": name,
- "COPYSPEED": copyspeed,
- "LUNCOPYTYPE": "1",
- "SOURCELUN": fmt.Sprintf("INVALID;%s;INVALID;INVALID;INVALID", srcid),
- "TARGETLUN": fmt.Sprintf("INVALID;%s;INVALID;INVALID;INVALID", tgtid),
- }
- lun := &LunResp{}
- err := c.request("POST", url, data, lun)
- if err != nil {
- log.Errorf("Create LunCopy failed :%v", err)
- return "", err
- }
- return lun.Data.Id, err
-}
-func (c *OceanStorClient) GetLunInfo(id string) (*Lun, error) {
- url := "/LUNCOPY/" + id
- lun := &LunResp{}
- err := c.request("GET", url, nil, lun)
- if err != nil {
- return nil, err
- }
- return &lun.Data, nil
-}
-func (c *OceanStorClient) StartLunCopy(luncopyid string) error {
- url := "/LUNCOPY/start"
- data := map[string]interface{}{
- "TYPE": ObjectTypeLunCopy,
- "ID": luncopyid,
- }
- err := c.request("PUT", url, data, nil)
- return err
-}
-
-func (c *OceanStorClient) DeleteLunCopy(luncopyid string) error {
- url := "/LUNCOPY/" + luncopyid
- err := c.request("DELETE", url, nil, nil)
- return err
-}
-
-func (c *OceanStorClient) GetVolume(id string) (*Lun, error) {
- lun := &LunResp{}
- err := c.request("GET", "/lun/"+id, nil, lun)
- if err != nil {
- return nil, err
- }
- return &lun.Data, err
-}
-
-func (c *OceanStorClient) GetVolumeByName(name string) (*Lun, error) {
- lun := &LunResp{}
- err := c.request("GET", "/lun?filter=NAME::"+name, nil, lun)
- if err != nil {
- return nil, err
- }
- return &lun.Data, err
-}
-func (c *OceanStorClient) DeleteVolume(id string) error {
- err := c.request("DELETE", "/lun/"+id, nil, nil)
- // If the lun already doesn't exist, delete command should not return err
- if c.checkErrorCode(err, ErrorLunNotExist) {
- return nil
- }
- return err
-}
-
-// ExtendVolume ...
-func (c *OceanStorClient) ExtendVolume(size int64, id string) error {
- data := map[string]interface{}{
- "CAPACITY": Gb2Sector(size),
- "ID": id,
- }
-
- err := c.request("PUT", "/lun/expand", data, nil)
- return err
-}
-
-func (c *OceanStorClient) CheckLunExist(id, wwn string) bool {
- lun := &LunResp{}
- err := c.request("GET", "/lun/"+id, nil, lun)
- if err != nil {
- return false
- }
- if wwn != "" && lun.Data.Wwn != wwn {
- log.Infof("LUN Id %s with WWN %s does not on the array.", id, wwn)
- return false
- }
- return true
-}
-
-func (c *OceanStorClient) CreateSnapshot(lunId, name, desc string) (*Snapshot, error) {
- data := map[string]interface{}{
- "PARENTTYPE": ObjectTypeLun,
- "PARENTID": lunId,
- "NAME": name,
- "DESCRIPTION": desc,
- }
- snap := &SnapshotResp{}
- err := c.request("POST", "/snapshot", data, snap)
- return &snap.Data, err
-}
-
-func (c *OceanStorClient) GetSnapshot(id string) (*Snapshot, error) {
- snap := &SnapshotResp{}
- err := c.request("GET", "/snapshot/"+id, nil, snap)
- return &snap.Data, err
-}
-
-func (c *OceanStorClient) GetSnapshotByName(name string) (*Snapshot, error) {
- snap := &SnapshotsResp{}
- err := c.request("GET", "/snapshot?filter=NAME::"+name, nil, snap)
- return &snap.Data[0], err
-}
-
-func (c *OceanStorClient) DeleteSnapshot(id string) error {
- return c.request("DELETE", "/snapshot/"+id, nil, nil)
-}
-
-func (c *OceanStorClient) ListStoragePools() ([]StoragePool, error) {
- pools := &StoragePoolsResp{}
- err := c.request("GET", "/storagepool?range=[0-100]", nil, pools)
- return pools.Data, err
-}
-
-func (c *OceanStorClient) ListAllStoragePools() ([]StoragePool, error) {
- pools := &StoragePoolsResp{}
- err := c.request("GET", "/storagepool", nil, pools)
- return pools.Data, err
-}
-
-func (c *OceanStorClient) GetPoolIdByName(poolName string) (string, error) {
- pools, err := c.ListAllStoragePools()
- if err != nil {
- return "", err
- }
- for _, p := range pools {
- if p.Name == poolName {
- return p.Id, nil
- }
- }
- return "", fmt.Errorf("not found specified pool '%s'", poolName)
-}
-
-func (c *OceanStorClient) AddHostWithCheck(hostInfo *pb.HostInfo) (string, error) {
- hostName := EncodeHostName(hostInfo.Host)
-
- hostId, _ := c.GetHostIdByName(hostName)
- if hostId != "" {
- return hostId, nil
- }
-
- reqBody := map[string]interface{}{
- "NAME": hostName,
- "OPERATIONSYSTEM": 0, /*linux*/
- "IP": hostInfo.Ip,
- }
- hostResp := &HostResp{}
-
- if err := c.request("POST", "/host", reqBody, hostResp); err != nil {
- if c.checkErrorCode(err, ErrorObjectNameAlreadyExist) {
- return c.GetHostIdByName(hostName)
- }
-
- log.Errorf("Create host failed, host name: %s, error: %v", hostName, err)
- return "", err
- }
-
- if hostResp.Data.Id != "" {
- return hostResp.Data.Id, nil
- }
-
- log.Errorf("Create host failed by host name: %s, error code:%d, description:%s",
- hostInfo.Host, hostResp.Error.Code, hostResp.Error.Description)
- return "", fmt.Errorf("Create host failed by host name: %s, error code:%d, description:%s",
- hostInfo.Host, hostResp.Error.Code, hostResp.Error.Description)
-}
-
-func (c *OceanStorClient) GetHostIdByName(hostName string) (string, error) {
- hostName = EncodeHostName(hostName)
- hostsResp := &HostsResp{}
-
- if err := c.request("GET", "/host?filter=NAME::"+hostName, nil, hostsResp); err != nil {
- log.Errorf("Get host failed by host name: %s, error: %v", hostName, err)
- return "", err
- }
-
- if len(hostsResp.Data) > 0 {
- return hostsResp.Data[0].Id, nil
- }
-
- log.Errorf("Get host failed by host name: %s, error code:%d, description:%s",
- hostName, hostsResp.Error.Code, hostsResp.Error.Description)
- return "", &NotFoundError{name: hostName}
-}
-
-func (c *OceanStorClient) AddInitiatorToHostWithCheck(hostId, initiatorName string) error {
-
- if !c.IsArrayContainInitiator(initiatorName) {
- if err := c.AddInitiatorToArray(initiatorName); err != nil {
- return err
- }
- }
- if !c.IsHostContainInitiator(hostId, initiatorName) {
- if err := c.AddInitiatorToHost(hostId, initiatorName); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (c *OceanStorClient) IsArrayContainInitiator(initiatorName string) bool {
- initiatorResp := &InitiatorResp{}
-
- if err := c.request("GET", "/iscsi_initiator/"+initiatorName, nil, initiatorResp); err != nil {
- log.Errorf("Get iscsi initiator failed by initiator name: %s, error: %v", initiatorName, err)
- return false
- }
-
- if initiatorResp.Data.Id == "" {
- log.Infof("Array does not contains the initiator: %s", initiatorName)
- return false
- }
-
- log.Infof("Array contains the initiator: %s", initiatorName)
- return true
-}
-
-func (c *OceanStorClient) IsHostContainInitiator(hostId, initiatorName string) bool {
- initiatorsResp := &InitiatorsResp{}
-
- if err := c.request("GET", "/iscsi_initiator?ISFREE=false&PARENTID="+hostId, nil, initiatorsResp); err != nil {
- log.Errorf("Get iscsi initiator failed by host id: %s, initiator name: %s, error: %v", hostId, initiatorName, err)
- return false
- }
-
- for _, initiator := range initiatorsResp.Data {
- if initiator.Id == initiatorName {
- log.Infof("Host:%s contains the initiator: %s", hostId, initiatorName)
- return true
- }
- }
-
- log.Infof("Host:%s does not contains the initiator: %s", hostId, initiatorName)
- return false
-}
-
-func (c *OceanStorClient) AddInitiatorToArray(initiatorName string) error {
-
- reqBody := map[string]interface{}{
- "ID": initiatorName,
- }
- initiatorResp := &InitiatorResp{}
-
- if err := c.request("POST", "/iscsi_initiator", reqBody, initiatorResp); err != nil {
- log.Errorf("Create iscsi initiator failed, initiator name: %s, error: %v", initiatorName, err)
- return err
- }
-
- if initiatorResp.Error.Code != 0 {
- log.Errorf("Add iscsi initiator to array failed, error code:%d, description:%s",
- initiatorResp.Error.Code, initiatorResp.Error.Description)
- return fmt.Errorf("code: %d, description: %s",
- initiatorResp.Error.Code, initiatorResp.Error.Description)
- }
-
- log.Infof("Create the initiator: %s successfully.", initiatorName)
- return nil
-}
-
-func (c *OceanStorClient) AddInitiatorToHost(hostId, initiatorName string) error {
-
- reqBody := map[string]interface{}{
- "ID": initiatorName,
- "PARENTID": hostId,
- }
- initiatorResp := &InitiatorResp{}
-
- if err := c.request("PUT", "/iscsi_initiator/"+initiatorName, reqBody, initiatorResp); err != nil {
- log.Errorf("Modify iscsi initiator failed, initiator name: %s, error: %v", initiatorName, err)
- return err
- }
-
- if initiatorResp.Error.Code != 0 {
- log.Errorf("Add iscsi initiator to host failed, error code:%d, description:%s",
- initiatorResp.Error.Code, initiatorResp.Error.Description)
- return fmt.Errorf("code: %d, description: %s",
- initiatorResp.Error.Code, initiatorResp.Error.Description)
- }
-
- log.Infof("Add the initiator: %s to host: %s successfully.", initiatorName, hostId)
- return nil
-}
-
-func (c *OceanStorClient) AddHostToHostGroup(hostId string) (string, error) {
- hostGrpName := PrefixHostGroup + hostId
- hostGrpId, err := c.CreateHostGroupWithCheck(hostGrpName)
- if err != nil {
- log.Errorf("Create host group witch check failed, host group id: %s, error: %v", hostGrpId, err)
- return "", err
- }
-
- contained := c.IsHostGroupContainHost(hostGrpId, hostId)
- if contained {
- return hostGrpId, nil
- }
-
- err = c.AssociateHostToHostGroup(hostGrpId, hostId)
- if err != nil {
- log.Errorf("Associate host to host group failed, host group id: %s, host id: %s, error: %v", hostGrpId, hostId, err)
- return "", err
- }
-
- return hostGrpId, nil
-}
-
-func (c *OceanStorClient) CreateHostGroupWithCheck(hostGrpName string) (string, error) {
- hostGrpId, _ := c.FindHostGroup(hostGrpName)
- if hostGrpId != "" {
- return hostGrpId, nil
- }
-
- hostGrpId, err := c.CreateHostGroup(hostGrpName)
- if err != nil {
- log.Errorf("Create host group with name: %s failed, error: %v", hostGrpName, err)
- return "", err
- }
-
- return hostGrpId, nil
-}
-
-func (c *OceanStorClient) FindHostGroup(groupName string) (string, error) {
- hostGrpsResp := &HostGroupsResp{}
-
- if err := c.request("GET", "/hostgroup?filter=NAME::"+groupName, nil, hostGrpsResp); err != nil {
- log.Errorf("Get host groups failed by filter name: %s, error: %v", groupName, err)
- return "", err
- }
-
- if hostGrpsResp.Error.Code != 0 {
- log.Errorf("Get host groups failed by filter name: %s, error code:%d, description:%s",
- groupName, hostGrpsResp.Error.Code, hostGrpsResp.Error.Description)
- return "", fmt.Errorf("code: %d, description: %s",
- hostGrpsResp.Error.Code, hostGrpsResp.Error.Description)
- }
-
- if len(hostGrpsResp.Data) == 0 {
- log.Infof("No host group with name %s was found.", groupName)
- return "", &NotFoundError{name: groupName}
- }
-
- return hostGrpsResp.Data[0].Id, nil
-}
-
-func (c *OceanStorClient) CreateHostGroup(groupName string) (string, error) {
- reqBody := map[string]interface{}{
- "NAME": groupName,
- }
-
- hostGrpResp := &HostGroupResp{}
-
- if err := c.request("POST", "/hostgroup", reqBody, hostGrpResp); err != nil {
- if c.checkErrorCode(err, ErrorObjectNameAlreadyExist) {
- return c.FindHostGroup(groupName)
- }
-
- log.Errorf("Create host group failed, group name: %s, error: %v", groupName, err)
- return "", err
- }
-
- if hostGrpResp.Error.Code != 0 {
- log.Errorf("Create host group failed, group name: %s, error code:%d, description:%s",
- groupName, hostGrpResp.Error.Code, hostGrpResp.Error.Description)
- return "", fmt.Errorf("code: %d, description: %s",
- hostGrpResp.Error.Code, hostGrpResp.Error.Description)
- }
-
- return hostGrpResp.Data.Id, nil
-}
-
-func (c *OceanStorClient) IsHostGroupContainHost(hostGrpId, hostId string) bool {
- hostsResp := &HostsResp{}
-
- if err := c.request("GET", "/host/associate?ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID="+hostGrpId, nil, hostsResp); err != nil {
- log.Errorf("List hosts failed by parent id: %s, error: %v", hostGrpId, err)
- return false
- }
-
- for _, host := range hostsResp.Data {
- if host.Id == hostId {
- log.Infof("HostGroup: %s contains the host: %s", hostGrpId, hostId)
- return true
- }
- }
-
- log.Infof("HostGroup: %s does not contain the host: %s", hostGrpId, hostId)
- return false
-}
-
-func (c *OceanStorClient) AssociateHostToHostGroup(hostGrpId, hostId string) error {
- reqBody := map[string]interface{}{
- "ID": hostGrpId,
- "ASSOCIATEOBJTYPE": ObjectTypeHost,
- "ASSOCIATEOBJID": hostId,
- }
-
- resp := &GenericResult{}
-
- if err := c.request("POST", "/hostgroup/associate", reqBody, resp); err != nil {
- if c.checkErrorCode(err, ErrorHostAlreadyInHostGroup) {
- return nil
- }
-
- log.Errorf("Associate host:%s to host group:%s failed, error: %v", hostId, hostGrpId, err)
- return err
- }
-
- if resp.Error.Code != 0 {
- log.Errorf("Associate host:%s to host group:%s failed, error code:%d, description:%s",
- hostId, hostGrpId, resp.Error.Code, resp.Error.Description)
- return fmt.Errorf("code: %d, description: %s",
- resp.Error.Code, resp.Error.Description)
- }
-
- return nil
-}
-
-func (c *OceanStorClient) DoMapping(lunId, hostGrpId, hostId string) error {
- var err error
-
- // Find or create lun group and add lun into lun group.
- lunGrpName := PrefixLunGroup + hostId
- lunGrpId, _ := c.FindLunGroup(lunGrpName)
- if lunGrpId == "" {
- lunGrpId, err = c.CreateLunGroup(lunGrpName)
- if err != nil {
- log.Errorf("Create lun group failed, group name:%s, error: %v", lunGrpName, err)
- return err
- }
- }
- if !c.IsLunGroupContainLun(lunGrpId, lunId) {
- if err := c.AssociateLunToLunGroup(lunGrpId, lunId); err != nil {
- log.Errorf("Associate lun to lun group failed, group id:%s, lun id:%s, error: %v", lunGrpId, lunId, err)
- return err
- }
- }
-
- // Find or create mapping view
- mappingViewName := PrefixMappingView + hostId
- mappingViewId, _ := c.FindMappingView(mappingViewName)
- if mappingViewId == "" {
- mappingViewId, err = c.CreateMappingView(mappingViewName)
- if err != nil {
- log.Errorf("Create mapping view failed, view name:%s, error: %v", mappingViewName, err)
- return err
- }
- }
-
- // Associate host group and lun group to mapping view.
- if !c.IsMappingViewContainHostGroup(mappingViewId, hostGrpId) {
- if err := c.AssocateHostGroupToMappingView(mappingViewId, hostGrpId); err != nil {
- log.Errorf("Assocate host group to mapping view failed, view id:%s, host group id:%s, error: %v",
- mappingViewId, hostGrpId, err)
- return err
- }
- }
- if !c.IsMappingViewContainLunGroup(mappingViewId, lunGrpId) {
- if err := c.AssocateLunGroupToMappingView(mappingViewId, lunGrpId); err != nil {
- log.Errorf("Assocate lun group to mapping view failed, view id:%s, lun group id:%s, error: %v",
- mappingViewId, lunGrpId, err)
- return err
- }
- }
-
- log.Infof("DoMapping successfully, with params lunId:%s, hostGrpId:%s, hostId:%s",
- lunId, lunGrpId, hostId)
- return nil
-}
-
-func (c *OceanStorClient) FindLunGroup(groupName string) (string, error) {
- lunGrpsResp := &LunGroupsResp{}
-
- if err := c.request("GET", "/lungroup?filter=NAME::"+groupName, nil, lunGrpsResp); err != nil {
- log.Errorf("Get lun groups failed by filter name: %s, error: %v", groupName, err)
- return "", err
- }
-
- if lunGrpsResp.Error.Code != 0 {
- log.Errorf("Get lun groups failed by filter name: %s, error code:%d, description:%s",
- groupName, lunGrpsResp.Error.Code, lunGrpsResp.Error.Description)
- return "", fmt.Errorf("code: %d, description: %s",
- lunGrpsResp.Error.Code, lunGrpsResp.Error.Description)
- }
-
- if len(lunGrpsResp.Data) == 0 {
- log.Infof("No lun group with name %s was found.", groupName)
- return "", &NotFoundError{name: groupName}
- }
-
- return lunGrpsResp.Data[0].Id, nil
-}
-
-func (c *OceanStorClient) FindMappingView(name string) (string, error) {
-
- mvsResp := &MappingViewsResp{}
-
- if err := c.request("GET", "/mappingview?filter=NAME::"+name, nil, mvsResp); err != nil {
- log.Errorf("Get mapping views failed by filter name: %s, error: %v", name, err)
- return "", err
- }
-
- if mvsResp.Error.Code != 0 {
- log.Errorf("Get mapping views failed by filter name: %s, error code:%d, description:%s",
- name, mvsResp.Error.Code, mvsResp.Error.Description)
- return "", fmt.Errorf("code: %d, description: %s",
- mvsResp.Error.Code, mvsResp.Error.Description)
- }
-
- if len(mvsResp.Data) == 0 {
- log.Infof("No mapping view with name %s was found.", name)
- return "", &NotFoundError{name: name}
- }
-
- return mvsResp.Data[0].Id, nil
-}
-
-func (c *OceanStorClient) CreateLunGroup(groupName string) (string, error) {
- reqBody := map[string]interface{}{
- "NAME": groupName,
- "APPTYPE": 0,
- "GROUPTYPE": 0,
- }
-
- lunGrpResp := &LunGroupResp{}
-
- if err := c.request("POST", "/lungroup", reqBody, lunGrpResp); err != nil {
- if c.checkErrorCode(err, ErrorObjectNameAlreadyExist) {
- return c.FindLunGroup(groupName)
- }
-
- log.Errorf("Create lun group failed, group name: %s, error: %v", groupName, err)
- return "", err
- }
-
- if lunGrpResp.Error.Code != 0 {
- log.Errorf("Create lun group failed, group name: %s, error code:%d, description:%s",
- groupName, lunGrpResp.Error.Code, lunGrpResp.Error.Description)
- return "", fmt.Errorf("code: %d, description: %s",
- lunGrpResp.Error.Code, lunGrpResp.Error.Description)
- }
-
- return lunGrpResp.Data.Id, nil
-}
-
-func (c *OceanStorClient) CreateMappingView(name string) (string, error) {
- reqBody := map[string]interface{}{
- "NAME": name,
- }
-
- mvResp := &MappingViewResp{}
-
- if err := c.request("POST", "/mappingview", reqBody, mvResp); err != nil {
- if c.checkErrorCode(err, ErrorObjectNameAlreadyExist) {
- return c.FindMappingView(name)
- }
-
- log.Errorf("Create mapping view failed, view name: %s, error: %v", name, err)
- return "", err
- }
-
- if mvResp.Error.Code != 0 {
- log.Errorf("Create mapping view failed, view name: %s, error code:%d, description:%s",
- name, mvResp.Error.Code, mvResp.Error.Description)
- return "", fmt.Errorf("code: %d, description: %s",
- mvResp.Error.Code, mvResp.Error.Description)
- }
-
- return mvResp.Data.Id, nil
-}
-
-func (c *OceanStorClient) IsLunGroupContainLun(lunGrpId, lunId string) bool {
- lunsResp := &LunsResp{}
-
- if err := c.request("GET", "/lun/associate?ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID="+lunGrpId, nil, lunsResp); err != nil {
- log.Errorf("List luns failed by lun group id: %s, error: %v", lunGrpId, err)
- return false
- }
-
- for _, lun := range lunsResp.Data {
- if lun.Id == lunId {
- log.Infof("LunGroup: %s contains the lun: %s", lunGrpId, lunId)
- return true
- }
- }
-
- log.Infof("LunGroup: %s does not contain the lun: %s", lunGrpId, lunId)
- return false
-}
-
-func (c *OceanStorClient) AssociateLunToLunGroup(lunGrpId, lunId string) error {
- reqBody := map[string]interface{}{
- "ID": lunGrpId,
- "ASSOCIATEOBJTYPE": ObjectTypeLun,
- "ASSOCIATEOBJID": lunId,
- }
-
- resp := &GenericResult{}
-
- if err := c.request("POST", "/lungroup/associate", reqBody, resp); err != nil {
- if c.checkErrorCode(err, ErrorObjectIDNotUnique) {
- return nil
- }
-
- log.Errorf("Associate lun:%s to lun group:%s failed, error: %v", lunId, lunGrpId, err)
- return err
- }
-
- if resp.Error.Code != 0 {
- log.Errorf("Associate lun:%s to lun group:%s failed, error code:%d, description:%s",
- lunId, lunGrpId, resp.Error.Code, resp.Error.Description)
- return fmt.Errorf("code: %d, description: %s",
- resp.Error.Code, resp.Error.Description)
- }
-
- return nil
-}
-
-func (c *OceanStorClient) IsMappingViewContainHostGroup(viewId, groupId string) bool {
- mvsResp := &MappingViewsResp{}
- if err := c.request("GET", "/mappingview/associate?ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID="+groupId, nil, mvsResp); err != nil {
- log.Errorf("List mapping views failed by host group id: %s, error: %v", groupId, err)
- return false
- }
-
- for _, view := range mvsResp.Data {
- if view.Id == viewId {
- log.Infof("Mapping view: %s contains the host group: %s", viewId, groupId)
- return true
- }
- }
-
- log.Infof("Mapping view: %s does not contain the host group: %s", viewId, groupId)
- return false
-}
-
-func (c *OceanStorClient) AssocateHostGroupToMappingView(viewId, groupId string) error {
- reqBody := map[string]interface{}{
- "ID": viewId,
- "ASSOCIATEOBJTYPE": ObjectTypeHostGroup,
- "ASSOCIATEOBJID": groupId,
- }
-
- resp := &GenericResult{}
-
- if err := c.request("PUT", "/mappingview/create_associate", reqBody, resp); err != nil {
- if c.checkErrorCode(err, ErrorHostGroupAlreadyInMappingView) {
- return nil
- }
-
- log.Errorf("Associate host group:%s to mapping view:%s failed, error: %v", groupId, viewId, err)
- return err
- }
-
- if resp.Error.Code != 0 {
- log.Errorf("Associate host group:%s to mapping view:%s failed, error code:%d, description:%s",
- groupId, viewId, resp.Error.Code, resp.Error.Description)
- return fmt.Errorf("code: %d, description: %s",
- resp.Error.Code, resp.Error.Description)
- }
-
- return nil
-}
-
-func (c *OceanStorClient) IsMappingViewContainLunGroup(viewId, groupId string) bool {
- mvsResp := &MappingViewsResp{}
- if err := c.request("GET", "/mappingview/associate?ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID="+groupId, nil, mvsResp); err != nil {
- log.Errorf("List mapping views failed by lun group id: %s, error: %v", groupId, err)
- return false
- }
-
- for _, view := range mvsResp.Data {
- if view.Id == viewId {
- log.Infof("Mapping view: %s contains the lun group: %s", viewId, groupId)
- return true
- }
- }
-
- log.Infof("Mapping view: %s does not contain the lun group: %s", viewId, groupId)
- return false
-}
-
-func (c *OceanStorClient) AssocateLunGroupToMappingView(viewId, groupId string) error {
- reqBody := map[string]interface{}{
- "ID": viewId,
- "ASSOCIATEOBJTYPE": ObjectTypeLunGroup,
- "ASSOCIATEOBJID": groupId,
- }
-
- resp := &GenericResult{}
-
- if err := c.request("PUT", "/mappingview/create_associate", reqBody, resp); err != nil {
- if c.checkErrorCode(err, ErrorLunGroupAlreadyInMappingView) {
- return nil
- }
-
- log.Errorf("Associate lun group:%s to mapping view:%s failed, error: %v", groupId, viewId, err)
- return err
- }
-
- if resp.Error.Code != 0 {
- log.Errorf("Associate lun group:%s to mapping view:%s failed, error code:%d, description:%s",
- groupId, viewId, resp.Error.Code, resp.Error.Description)
- return fmt.Errorf("code: %d, description: %s",
- resp.Error.Code, resp.Error.Description)
- }
-
- return nil
-}
-
-func (c *OceanStorClient) ListTgtPort() (*IscsiTgtPortsResp, error) {
- resp := &IscsiTgtPortsResp{}
- if err := c.request("GET", "/iscsi_tgt_port", nil, resp); err != nil {
- log.Errorf("Get tgt port failed, error: %v", err)
- return nil, err
- }
-
- if resp.Error.Code != 0 {
- log.Errorf("Get tgt port failed, error code:%d, description:%s",
- resp.Error.Code, resp.Error.Description)
- return nil, fmt.Errorf("code: %d, description: %s", resp.Error.Code, resp.Error.Description)
- }
- return resp, nil
-}
-
-func (c *OceanStorClient) ListHostAssociateLuns(hostId string) (*HostAssociateLunsResp, error) {
- resp := &HostAssociateLunsResp{}
- url := fmt.Sprintf("/lun/associate?TYPE=11&ASSOCIATEOBJTYPE=21&ASSOCIATEOBJID=%s", hostId)
- if err := c.request("GET", url, nil, resp); err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *OceanStorClient) GetHostLunId(hostId, lunId string) (int, error) {
- resp, err := c.ListHostAssociateLuns(hostId)
- if err != nil {
- return -1, err
- }
-
- type Metadata struct {
- HostLunId int `json:"HostLUNID"`
- }
-
- for _, lun := range resp.Data {
- if lun.Id != lunId {
- continue
- }
- md := &Metadata{}
- if err := json.Unmarshal([]byte(lun.AssociateMetadata), md); err != nil {
- log.Error("Decoding json error,", err)
- return -1, err
- }
- return md.HostLunId, nil
- }
-
- msg := fmt.Sprintf("cannot find the host lun id of lun %s", lunId)
- log.Info(msg)
- return -1, errors.New(msg)
-}
-
-func (c *OceanStorClient) RemoveLunFromLunGroup(lunGrpId, lunId string) error {
- url := fmt.Sprintf("/lungroup/associate?ID=%s&ASSOCIATEOBJTYPE=11&ASSOCIATEOBJID=%s", lunGrpId, lunId)
- if err := c.request("DELETE", url, nil, nil); err != nil {
- log.Errorf("Remove lun %s from lun group %s failed, %v", lunId, lunGrpId, err)
- return err
- }
- log.Infof("Remove lun %s from lun group %s success", lunId, lunGrpId)
- return nil
-}
-
-func (c *OceanStorClient) RemoveLunGroupFromMappingView(viewId, lunGrpId string) error {
- if !c.IsMappingViewContainLunGroup(viewId, lunGrpId) {
- log.Infof("Lun group %s has already been removed from mapping view %s", lunGrpId, viewId)
- return nil
- }
- url := "/mappingview/REMOVE_ASSOCIATE"
- data := map[string]interface{}{
- "ASSOCIATEOBJTYPE": ObjectTypeLunGroup,
- "ASSOCIATEOBJID": lunGrpId,
- "TYPE": ObjectTypeMappingView,
- "ID": viewId}
- if err := c.request("PUT", url, data, nil); err != nil {
- log.Errorf("Remove lun group %s from mapping view %s failed", lunGrpId, viewId)
- return err
- }
- log.Infof("Remove lun group %s from mapping view %s success", lunGrpId, viewId)
- return nil
-}
-
-func (c *OceanStorClient) RemoveHostGroupFromMappingView(viewId, hostGrpId string) error {
- if !c.IsMappingViewContainHostGroup(viewId, hostGrpId) {
- log.Infof("Host group %s has already been removed from mapping view %s", hostGrpId, viewId)
- return nil
- }
- url := "/mappingview/REMOVE_ASSOCIATE"
- data := map[string]interface{}{
- "ASSOCIATEOBJTYPE": ObjectTypeHostGroup,
- "ASSOCIATEOBJID": hostGrpId,
- "TYPE": ObjectTypeMappingView,
- "ID": viewId}
- if err := c.request("PUT", url, data, nil); err != nil {
- log.Errorf("Remove host group %s from mapping view %s failed", hostGrpId, viewId)
- return err
- }
- log.Infof("Remove host group %s from mapping view %s success", hostGrpId, viewId)
- return nil
-}
-
-func (c *OceanStorClient) RemoveHostFromHostGroup(hostGrpId, hostId string) error {
-
- url := fmt.Sprintf("/host/associate?TYPE=14&ID=%s&ASSOCIATEOBJTYPE=21&ASSOCIATEOBJID=%s",
- hostGrpId, hostId)
- if err := c.request("DELETE", url, nil, nil); err != nil {
- log.Errorf("Remove host %s from host group %s failed", hostId, hostGrpId)
- return err
- }
- log.Infof("Remove host %s from host group %s success", hostId, hostGrpId)
- return nil
-}
-
-func (c *OceanStorClient) RemoveIscsiFromHost(initiator string) error {
-
- url := "/iscsi_initiator/remove_iscsi_from_host"
- data := map[string]interface{}{"TYPE": ObjectTypeIscsiInitiator, "ID": initiator}
- if err := c.request("PUT", url, data, nil); err != nil {
- log.Errorf("Remove initiator %s failed", initiator)
- return err
- }
- log.Infof("Remove initiator %s success", initiator)
- return nil
-}
-
-func (c *OceanStorClient) DeleteHostGroup(id string) error {
- return c.request("DELETE", "/hostgroup/"+id, nil, nil)
-}
-
-func (c *OceanStorClient) DeleteLunGroup(id string) error {
- return c.request("DELETE", "/LUNGroup/"+id, nil, nil)
-}
-
-func (c *OceanStorClient) DeleteHost(id string) error {
- return c.request("DELETE", "/host/"+id, nil, nil)
-}
-
-func (c *OceanStorClient) DeleteMappingView(id string) error {
- return c.request("DELETE", "/mappingview/"+id, nil, nil)
-}
-
-func (c *OceanStorClient) GetArrayInfo() (*System, error) {
- sys := &SystemResp{}
- err := c.request("GET", "/system/", nil, sys)
- if err != nil {
- log.Error("Get system info failed,", err)
- return nil, err
- }
- return &sys.Data, nil
-}
-
-func (c *OceanStorClient) ListRemoteDevices() (*[]RemoteDevice, error) {
- dev := &RemoteDevicesResp{}
- err := c.request("GET", "/remote_device", nil, dev)
- if err != nil {
- log.Error("List remote devices failed,", err)
- return nil, err
- }
- return &dev.Data, nil
-}
-
-func (c *OceanStorClient) CreatePair(params map[string]interface{}) (*ReplicationPair, error) {
- pair := &ReplicationPairResp{}
- err := c.request("POST", "/REPLICATIONPAIR", params, pair)
- return &pair.Data, err
-}
-
-func (c *OceanStorClient) GetPair(id string) (*ReplicationPair, error) {
- pair := &ReplicationPairResp{}
- err := c.request("GET", "/REPLICATIONPAIR/"+id, nil, pair)
- if err != nil {
- log.Errorf("Get pair failed, %v", err)
- return nil, err
- }
- return &pair.Data, err
-}
-
-func (c *OceanStorClient) SwitchPair(id string) error {
- data := map[string]interface{}{"ID": id, "TYPE": ObjectTypeReplicationPair}
- err := c.request("PUT", "/REPLICATIONPAIR/switch", data, nil)
- if err != nil {
- log.Errorf("Switch pair failed, %v", err)
- }
- return err
-}
-
-func (c *OceanStorClient) SplitPair(id string) error {
- data := map[string]interface{}{"ID": id, "TYPE": ObjectTypeReplicationPair}
- err := c.request("PUT", "/REPLICATIONPAIR/split", data, nil)
- if err != nil {
- log.Errorf("Split pair failed, %v", err)
- }
- return err
-}
-
-func (c *OceanStorClient) SyncPair(id string) error {
- data := map[string]interface{}{"ID": id, "TYPE": ObjectTypeReplicationPair}
- err := c.request("PUT", "/REPLICATIONPAIR/sync", data, nil)
- if err != nil {
- log.Errorf("Sync pair failed, %v", err)
- }
- return err
-}
-
-func (c *OceanStorClient) SetPairSecondAccess(id string, access string) error {
- data := map[string]interface{}{"ID": id, "SECRESACCESS": access}
- err := c.request("PUT", "/REPLICATIONPAIR/"+id, data, nil)
- if err != nil {
- log.Errorf("Set pair secondary access failed, %v", err)
- }
- return err
-}
-
-func (c *OceanStorClient) DeletePair(id string) error {
- return c.request("DELETE", "/REPLICATIONPAIR/"+id, nil, nil)
-}
-
-func (c *OceanStorClient) CheckPairExist(id string) bool {
- resp := &SimpleResp{}
- err := c.request("GET", "/REPLICATIONPAIR/"+id, nil, resp)
- return err == nil
-}
-
-func (c *OceanStorClient) GetHostOnlineFCInitiators(hostId string) ([]string, error) {
- resp := &FCInitiatorsResp{}
- url := fmt.Sprintf("/fc_initiator?PARENTTYPE=21&PARENTID=%s", hostId)
- if err := c.request("GET", url, nil, resp); err != nil {
- log.Errorf("Get host online fc initiators from host %s failed.", hostId)
- return nil, err
- }
-
- var initiators []string
- if len(resp.Data) > 0 {
- for _, item := range resp.Data {
- if item.ParentId != "" && item.ParentId == hostId && item.RunningStatus == RunningStatusOnline {
- initiators = append(initiators, item.Id)
- }
- }
- }
- log.Infof("Get host online fc initiators from host %s success.", hostId)
- return initiators, nil
-}
-
-func (c *OceanStorClient) GetOnlineFreeWWNs() ([]string, error) {
- resp := &FCInitiatorsResp{}
- url := "/fc_initiator?ISFREE=true&range=[0-65535]"
- if err := c.request("GET", url, nil, resp); err != nil {
- log.Errorf("Get online free wwns failed.")
- return nil, err
- }
-
- var wwns []string
- if len(resp.Data) > 0 {
- for _, item := range resp.Data {
- if item.RunningStatus == RunningStatusOnline {
- wwns = append(wwns, item.Id)
- }
- }
- }
-
- log.Infof("Get online free wwns success.")
- return wwns, nil
-}
-
-func (c *OceanStorClient) GetOnlineFCInitiatorOnArray() ([]string, error) {
- resp := &FCInitiatorsResp{}
- url := "/fc_initiator?range=[0-65535]"
- if err := c.request("GET", url, nil, resp); err != nil {
- log.Errorf("Get online FC initiator on array failed.")
- return nil, err
- }
-
- var fcInitiators []string
- for _, item := range resp.Data {
- if item.RunningStatus == RunningStatusOnline {
- fcInitiators = append(fcInitiators, item.Id)
- }
- }
-
- log.Infof("Get online fc initiators success.")
- return fcInitiators, nil
-}
-
-func (c *OceanStorClient) GetHostFCInitiators(hostId string) ([]string, error) {
- resp := &FCInitiatorsResp{}
- url := fmt.Sprintf("/fc_initiator?PARENTTYPE=21&PARENTID=%s", hostId)
- if err := c.request("GET", url, nil, resp); err != nil {
- log.Errorf("Get host fc initiators failed.")
- return nil, err
- }
-
- var initiators []string
- if len(resp.Data) > 0 {
- for _, item := range resp.Data {
- if item.ParentId != "" && item.ParentId == hostId {
- initiators = append(initiators, item.Id)
- }
- }
- }
- return initiators, nil
-}
-
-func (c *OceanStorClient) GetHostIscsiInitiators(hostId string) ([]string, error) {
- resp := &InitiatorsResp{}
- url := fmt.Sprintf("/iscsi_initiator?PARENTTYPE=21&PARENTID=%s", hostId)
- if err := c.request("GET", url, nil, resp); err != nil {
- log.Errorf("Get host iscsi initiators failed.")
- return nil, err
- }
-
- var initiators []string
- if len(resp.Data) > 0 {
- for _, item := range resp.Data {
- if item.ParentId != "" && item.ParentId == hostId {
- initiators = append(initiators, item.Id)
- }
- }
- }
-
- log.Infof("Get host iscsi initiators success.")
- return initiators, nil
-}
-
-func (c *OceanStorClient) IsHostAssociatedToHostgroup(hostId string) (bool, error) {
- resp := &HostResp{}
- url := fmt.Sprintf("/host/%s", hostId)
- if err := c.request("GET", url, nil, resp); err != nil {
- log.Errorf("Get host iscsi initiators failed.")
- return false, err
- }
-
- if resp.Data.IsAddToHostGroup {
- return true, nil
- }
-
- return false, nil
-}
-
-func (c *OceanStorClient) RemoveHost(hostId string) error {
- return c.request("DELETE", fmt.Sprintf("/host/%s", hostId), nil, nil)
-}
-
-func (c *OceanStorClient) AddFCPortTohost(hostId string, wwn string) error {
- url := fmt.Sprintf("/fc_initiator/%s", wwn)
- data := map[string]interface{}{
- "TYPE": ObjectTypeFcInitiator,
- "ID": wwn,
- "PARENTTYPE": ObjectTypeHost,
- "PARENTID": hostId,
- }
-
- if err := c.request("PUT", url, data, nil); err != nil {
- log.Errorf("Add fc port to host failed.")
- return err
- }
-
- return nil
-}
-
-func (c *OceanStorClient) GetIniTargMap(wwns []string) ([]string, map[string][]string, error) {
- initTargMap := make(map[string][]string)
- var tgtPortWWNs []string
- for _, wwn := range wwns {
- tgtwwpns, err := c.getFCTargetWWPNs(wwn)
- if err != nil {
- return nil, nil, err
- }
- if tgtwwpns == nil {
- continue
- }
-
- initTargMap[wwn] = tgtwwpns
- for _, tgtwwpn := range tgtwwpns {
- if !c.isInStringArray(tgtwwpn, tgtPortWWNs) {
- tgtPortWWNs = append(tgtPortWWNs, tgtwwpn)
- }
- }
- }
-
- if tgtPortWWNs == nil {
- msg := fmt.Sprintf("Get fc target wwpns error, tgt_port_wwns:%s, init_targ_map:%s", tgtPortWWNs, initTargMap)
- log.Errorf(msg)
- return nil, nil, errors.New(msg)
- }
-
- return tgtPortWWNs, initTargMap, nil
-}
-
-func (c *OceanStorClient) isInStringArray(s string, source []string) bool {
- for _, i := range source {
- if s == i {
- return true
- }
- }
- return false
-}
-
-func (c *OceanStorClient) getFCTargetWWPNs(wwn string) ([]string, error) {
- resp := &FCTargWWPNResp{}
- url := fmt.Sprintf("/host_link?INITIATOR_TYPE=223&INITIATOR_PORT_WWN=%s", wwn)
- if err := c.request("GET", url, nil, resp); err != nil {
- log.Errorf("Get fc target wwpn failed.")
- return nil, err
- }
-
- var fcWWPNs []string
- if len(resp.Data) > 0 {
- for _, item := range resp.Data {
- if wwn == item.IniPortWWN {
- fcWWPNs = append(fcWWPNs, item.TargPortWWN)
- }
- }
- }
-
- return fcWWPNs, nil
-}
-
-func (c *OceanStorClient) getObjCountFromLungroupByType(lunGroupId, lunType string) (int, error) {
- // Get obj count associated to the lungroup.
- var cmdType string
- if lunType == ObjectTypeLun {
- cmdType = "lun"
- } else {
- cmdType = "snapshot"
- }
-
- resp := &ObjCountResp{}
- url := fmt.Sprintf("/%s/count?TYPE=%s&ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=%s", cmdType, lunType, lunGroupId)
- if err := c.request("GET", url, nil, resp); err != nil {
- log.Errorf("Get Obj count from lungroup by type failed.")
- return 0, err
- }
- if resp.Error.Code == ErrorObjectUnavailable {
- log.Errorf("LUN group %s not exist.", lunGroupId)
- return 0, nil
- }
- count, _ := strconv.Atoi(resp.Data.Count)
- return count, nil
-}
-
-func (c *OceanStorClient) getObjectCountFromLungroup(lunGrpId string) (int, error) {
- lunCount, err := c.getObjCountFromLungroupByType(lunGrpId, ObjectTypeLun)
- if err != nil {
- return 0, nil
- }
- snapshotCount, err := c.getObjCountFromLungroupByType(lunGrpId, ObjectTypeSnapshot)
- if err != nil {
- return 0, nil
- }
- return lunCount + snapshotCount, nil
-}
-
-func (c *OceanStorClient) getHostGroupNumFromHost(hostId string) (int, error) {
- resp := &ObjCountResp{}
- url := fmt.Sprintf("/hostgroup/count?TYPE=14&ASSOCIATEOBJTYPE=21&ASSOCIATEOBJID=%s", hostId)
- if err := c.request("GET", url, nil, resp); err != nil {
- log.Errorf("Get hostgroup num from host failed.")
- return 0, err
- }
-
- count, _ := strconv.Atoi(resp.Data.Count)
- return count, nil
-}
-
-func (c *OceanStorClient) removeFCFromHost(wwn string) error {
- data := map[string]interface{}{
- "TYPE": ObjectTypeFcInitiator,
- "ID": wwn,
- }
-
- err := c.request("PUT", "/fc_initiator/remove_fc_from_host", data, nil)
- return err
-}
-
-func (c *OceanStorClient) getHostgroupAssociatedViews(hostGrpId string) ([]MappingView, error) {
- resp := &MappingViewsResp{}
- url := fmt.Sprintf("/mappingview/associate?TYPE=245&ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=%s", hostGrpId)
- if err := c.request("GET", url, nil, resp); err != nil {
- log.Errorf("Get hostgroup associated views failed.")
- return nil, err
- }
-
- return resp.Data, nil
-}
-
-func (c *OceanStorClient) getHostsInHostgroup(hostGrpId string) ([]Host, error) {
- resp := &HostsResp{}
- url := fmt.Sprintf("/host/associate?ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=%s", hostGrpId)
- if err := c.request("GET", url, nil, resp); err != nil {
- log.Errorf("Get hosts in hostgroup failed.")
- return nil, err
- }
-
- var tempHosts = []Host{}
- if resp.Error.Code == ErrorHostGroupNotExist {
- log.Errorf("Host group %s not exist", hostGrpId)
- return tempHosts, nil
- }
-
- return resp.Data, nil
-}
-
-func (c *OceanStorClient) checkFCInitiatorsExistInHost(hostId string) (bool, error) {
- resp := &FCInitiatorsResp{}
- url := fmt.Sprintf("/fc_initiator?range=[0-65535]&PARENTID=%s", hostId)
- if err := c.request("GET", url, nil, resp); err != nil {
- log.Errorf("Get FC initiators exist in host failed.")
- return false, err
- }
- if len(resp.Data) > 0 {
- return true, nil
- }
-
- return false, nil
-}
-
-func (c *OceanStorClient) checkIscsiInitiatorsExistInHost(hostId string) (bool, error) {
- resp := &FCInitiatorsResp{}
- url := fmt.Sprintf("/iscsi_initiator?range=[0-65535]&PARENTID=%s", hostId)
- if err := c.request("GET", url, nil, resp); err != nil {
- log.Errorf("Get FC initiators exist in host failed.")
- return false, err
- }
- if len(resp.Data) > 0 {
- return true, nil
- }
-
- return false, nil
-}
-
-func (c *OceanStorClient) ListControllers() ([]SimpleStruct, error) {
- resp := &SimpleResp{}
- err := c.request("GET", "/controller", nil, resp)
- return resp.Data, err
-}
-
-func (c *OceanStorClient) GetPerformance(resId string, dataIdList []string) (map[string]string, error) {
- perf := &PerformancesResp{}
- url := fmt.Sprintf("/performace_statistic/cur_statistic_data/"+
- "?CMO_STATISTIC_UUID=%s&CMO_STATISTIC_DATA_ID_LIST=%s", resId, strings.Join(dataIdList, ","))
- if err := c.request("GET", url, nil, perf); err != nil {
- return nil, err
- }
- if len(perf.Data) == 0 {
- return nil, fmt.Errorf("got empty performance data")
- }
-
- dataList := strings.Split(perf.Data[0].DataList, ",")
- idList := strings.Split(perf.Data[0].DataIdList, ",")
- if len(dataList) != len(idList) {
- return nil, fmt.Errorf("wrong response data, data id list length does not equal to data length")
- }
-
- perfMap := map[string]string{}
- for i, id := range idList {
- perfMap[id] = dataList[i]
- }
-
- return perfMap, nil
-}
-
-func (c *OceanStorClient) checkErrorCode(err error, code int) bool {
- if inErr, ok := err.(*ArrayInnerError); ok && inErr.Err.Code == code {
- return true
- }
-
- return false
-}
diff --git a/contrib/drivers/huawei/oceanstor/common.go b/contrib/drivers/huawei/oceanstor/common.go
deleted file mode 100644
index bfcb9c020..000000000
--- a/contrib/drivers/huawei/oceanstor/common.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package oceanstor
-
-import (
- "crypto/md5"
- "encoding/hex"
- "regexp"
- "strconv"
- "strings"
-
- log "github.com/golang/glog"
- . "github.com/opensds/opensds/contrib/drivers/utils/config"
-)
-
-type AuthOptions struct {
- Username string `yaml:"username,omitempty"`
- Password string `yaml:"password,omitempty"`
- VstoreName string `yaml:"vstoreName,omitempty"`
- PwdEncrypter string `yaml:"PwdEncrypter,omitempty"`
- EnableEncrypted bool `yaml:"EnableEncrypted,omitempty"`
- Endpoints string `yaml:"endpoints,omitempty"`
- Insecure bool `yaml:"insecure,omitempty"`
-}
-
-type Replication struct {
- RemoteAuthOpt AuthOptions `yaml:"remoteAuthOptions"`
-}
-
-type OceanStorConfig struct {
- AuthOptions `yaml:"authOptions"`
- Replication `yaml:"replication"`
- Pool map[string]PoolProperties `yaml:"pool,flow"`
- TargetIp string `yaml:"targetIp,omitempty"`
-}
-
-const UnitGi = 1024 * 1024 * 1024
-
-func EncodeName(id string) string {
- h := md5.New()
- h.Write([]byte(id))
- encodedName := hex.EncodeToString(h.Sum(nil))
- prefix := strings.Split(id, "-")[0] + "-"
- postfix := encodedName[:MaxNameLength-len(prefix)]
- return prefix + postfix
-}
-
-func EncodeHostName(name string) string {
- isMatch, _ := regexp.MatchString(`[[:alnum:]-_.]+`, name)
- if len(name) > MaxNameLength || !isMatch {
- h := md5.New()
- h.Write([]byte(name))
- encodedName := hex.EncodeToString(h.Sum(nil))
- return encodedName[:MaxNameLength]
- }
- return name
-}
-
-func TruncateDescription(desc string) string {
- if len(desc) > MaxDescriptionLength {
- desc = desc[:MaxDescriptionLength]
- }
- return desc
-}
-
-func Sector2Gb(sec string) int64 {
- size, err := strconv.ParseInt(sec, 10, 64)
- if err != nil {
- log.Error("Convert capacity from string to number failed, error:", err)
- return 0
- }
- return size * 512 / UnitGi
-}
-
-func Gb2Sector(gb int64) int64 {
- return gb * UnitGi / 512
-}
diff --git a/contrib/drivers/huawei/oceanstor/common_test.go b/contrib/drivers/huawei/oceanstor/common_test.go
deleted file mode 100644
index 8d6d83f94..000000000
--- a/contrib/drivers/huawei/oceanstor/common_test.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package oceanstor
-
-import (
- "crypto/md5"
- "encoding/hex"
- "fmt"
- "math/rand"
- "testing"
- "time"
-
- "github.com/opensds/opensds/pkg/utils"
-)
-
-func TestEncodeName(t *testing.T) {
- id := "05935681-8a00-4988-bfd8-90fdb429aecd"
- exspect := "05935681-477ef4d6bb4af7652c1b97"
- result := EncodeName(id)
- if result != exspect {
- t.Error("Test EncodeName failed")
- }
- if len(result) > MaxNameLength {
- t.Error("EncodeName exceed the max name length")
- }
-}
-
-func TestEncodeHostName(t *testing.T) {
- normalName := "1234567890ABCabcZz_.-"
- result := EncodeHostName(normalName)
- if result != normalName {
- t.Error("Test EncodeHostName failed")
- }
- if len(result) > MaxNameLength {
- t.Error("EncodeName exceed the max name length")
- }
-
- longName := "opensds-huawei-oceanstor-opensds-huawei-oceanstor"
- result = EncodeHostName(longName)
- // generate expected result
- h := md5.New()
- h.Write([]byte(longName))
- encodedName := hex.EncodeToString(h.Sum(nil))
- expectedResult := encodedName[:MaxNameLength]
-
- if result != expectedResult {
- t.Error("Test EncodeHostName failed")
- }
- if len(result) > MaxNameLength {
- t.Error("EncodeName exceed the max name length")
- }
-
- invalidName := "iqn.1993-08.org.debian:01:d1f6c8e930e7"
- result = EncodeHostName(invalidName)
- // generate expected result
- h = md5.New()
- h.Write([]byte(invalidName))
- encodedName = hex.EncodeToString(h.Sum(nil))
- expectedResult = encodedName[:MaxNameLength]
-
- if result != expectedResult {
- t.Error("Test EncodeHostName failed")
- }
- if len(result) > MaxNameLength {
- t.Error("EncodeName exceed the max name length")
- }
-}
-
-func randSeq(n int) string {
- var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
- b := make([]rune, n)
- for i := range b {
- b[i] = letters[rand.Intn(len(letters))]
- }
- return string(b)
-}
-
-func TestTruncateDescription(t *testing.T) {
- normalDescription := "This is huawei oceanstor driver testing"
- result := TruncateDescription(normalDescription)
- if result != normalDescription {
- t.Error("Test TruncateDescription failed")
- }
- if len(result) > MaxDescriptionLength {
- t.Error("TruncateDescription exceed the max name length")
- }
-
- longDescription := randSeq(MaxDescriptionLength + 1)
- result = TruncateDescription(longDescription)
- if len(result) > MaxDescriptionLength {
- t.Error("TruncateDescription exceed the max name length")
- }
-
- longDescription = randSeq(MaxDescriptionLength + 255)
- result = TruncateDescription(longDescription)
- if len(result) > MaxDescriptionLength {
- t.Error("TruncateDescription exceed the max name length")
- }
-}
-
-func TestWaitForCondition(t *testing.T) {
- var count = 0
- err := utils.WaitForCondition(func() (bool, error) {
- count++
- time.Sleep(2 * time.Microsecond)
- if count >= 5 {
- return true, nil
- }
- return false, nil
- }, 1*time.Microsecond, 100*time.Second)
- if err != nil {
- t.Errorf("Test WaitForCondition failed, %v", err)
- }
-
- count = 0
- err = utils.WaitForCondition(func() (bool, error) {
- count++
- time.Sleep(1 * time.Millisecond)
- if count >= 5 {
- return true, nil
- }
- return false, nil
- }, 4*time.Millisecond, 100*time.Millisecond)
- if err != nil {
- t.Errorf("Test WaitForCondition failed, %v", err)
- }
-
- err = utils.WaitForCondition(func() (bool, error) {
- return true, fmt.Errorf("test error....")
- }, 4*time.Millisecond, 100*time.Millisecond)
- if err == nil {
- t.Errorf("Test WaitForCondition failed, %v", err)
- }
-
- count = 0
- err = utils.WaitForCondition(func() (bool, error) {
- count++
- time.Sleep(2 * time.Millisecond)
- if count >= 5 {
- return true, nil
- }
- return false, nil
- }, 2*time.Millisecond, 5*time.Millisecond)
- if err == nil {
- t.Errorf("Test WaitForCondition failed, %v", err)
- }
-}
diff --git a/contrib/drivers/huawei/oceanstor/constants.go b/contrib/drivers/huawei/oceanstor/constants.go
deleted file mode 100644
index c07b0a7de..000000000
--- a/contrib/drivers/huawei/oceanstor/constants.go
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific l
-
-package oceanstor
-
-import "time"
-
-// default value for driver
-const (
- defaultConfPath = "/etc/opensds/driver/huawei_oceanstor_block.yaml"
- defaultAZ = "default"
-)
-
-// metadata keys
-const (
- KLunId = "huaweiLunId"
- KSnapId = "huaweiSnapId"
- KPairId = "huaweiReplicaPairId" // replication pair
-)
-
-// name prefix
-const (
- PrefixMappingView = "OpenSDSMappingView"
- PrefixLunGroup = "OpenSDSLunGroup"
- PrefixHostGroup = "OpenSDSHostGroup"
-)
-
-// object type id
-const (
- ObjectTypeLun = "11"
- ObjectTypeHost = "21"
- ObjectTypeSnapshot = "27"
- ObjectTypeHostGroup = "14"
- ObjectTypeController = "207"
- ObjectTypePool = "216"
- ObjectTypeLunCopy = 219 // not string, should be integer
- ObjectTypeIscsiInitiator = "222"
- ObjectTypeFcInitiator = "223"
- ObjectTypeMappingView = "245"
- ObjectTypeLunGroup = "256"
- ObjectTypeReplicationPair = "263"
-)
-
-// Error Code
-const (
- ErrorConnectToServer = -403
- ErrorUnauthorizedToServer = -401
- ErrorObjectUnavailable = 1077948996
- ErrorHostGroupNotExist = 1077937500
- ErrorObjectNameAlreadyExist = 1077948993
- ErrorHostAlreadyInHostGroup = 1077937501
- ErrorObjectIDNotUnique = 1077948997
- ErrorHostGroupAlreadyInMappingView = 1073804556
- ErrorLunGroupAlreadyInMappingView = 1073804560
- ErrorLunNotExist = 1077936859
-)
-
-// misc
-const (
- ThickLunType = 0
- ThinLunType = 1
- MaxNameLength = 31
- MaxDescriptionLength = 170
- PortNumPerContr = 2
- PwdExpired = 3
- PwdReset = 4
-)
-
-// lun copy
-const (
- LunCopySpeedLow = "1"
- LunCopySpeedMedium = "2"
- LunCopySpeedHigh = "3"
- LunCopySpeedHighest = "4"
-)
-
-var LunCopySpeedTypes = []string{LunCopySpeedLow, LunCopySpeedMedium, LunCopySpeedHigh, LunCopySpeedHighest}
-
-const (
- LunReadyWaitInterval = 2 * time.Second
- LunReadyWaitTimeout = 20 * time.Second
- LunCopyWaitInterval = 2 * time.Second
- LunCopyWaitTimeout = 200 * time.Second
-)
-
-// Object status key id
-const (
- StatusHealth = "1"
- StatusQosActive = "2"
- StatusRunning = "10"
- StatusVolumeReady = "27"
- StatusLunCoping = "39"
- StatusLunCopyStop = "38"
- StatusLunCopyQueue = "37"
- StatusLunCopyNotStart = "36"
- StatusLunCopyReady = "40"
- StatusActive = "43"
- StatusQosInactive = "45"
-)
-
-// Array type
-const (
- ArrayTypeReplication = "1"
- ArrayTypeHeterogeneity = "2"
- ArrayTypeUnknown = "3"
-)
-
-// Health status
-const (
- HealthStatusNormal = "1"
- HealthStatusFault = "2"
- HealthStatusPreFail = "3"
- HealthStatusPartiallyBroken = "4"
- HealthStatusDegraded = "5"
- HealthStatusBadSectorsFound = "6"
- HealthStatusBitErrorsFound = "7"
- HealthStatusConsistent = "8"
- HealthStatusInconsistent = "9"
- HealthStatusBusy = "10"
- HealthStatusNoInput = "11"
- HealthStatusLowBattery = "12"
- HealthStatusSingleLinkFault = "13"
- HealthStatusInvalid = "14"
- HealthStatusWriteProtect = "15"
-)
-
-// Running status
-const (
- RunningStatusNormal = "1"
- RunningStatusLinkUp = "10"
- RunningStatusLinkDown = "11"
- RunningStatusOnline = "27"
- RunningStatusDisabled = "31"
- RunningStatusInitialSync = "21"
- RunningStatusSync = "23"
- RunningStatusSynced = "24"
- RunningStatusSplit = "26"
- RunningStatusInterrupted = "34"
- RunningStatusInvalid = "35"
- RunningStatusConnecting = "101"
-)
-
-const (
- DefaultReplicaWaitInterval = 1 * time.Second
- DefaultReplicaWaitTimeout = 20 * time.Second
-
- ReplicaSyncMode = "1"
- ReplicaAsyncMode = "2"
- ReplicaSpeed = "2"
- ReplicaPeriod = "3600"
- ReplicaSecondRo = "2"
- ReplicaSecondRw = "3"
-
- ReplicaRunningStatusKey = "RUNNINGSTATUS"
- ReplicaHealthStatusKey = "HEALTHSTATUS"
- ReplicaHealthStatusNormal = "1"
-
- ReplicaLocalDataStatusKey = "PRIRESDATASTATUS"
- ReplicaRemoteDataStatusKey = "SECRESDATASTATUS"
- ReplicaDataSyncKey = "ISDATASYNC"
- ReplicaDataStatusSynced = "1"
- ReplicaDataStatusComplete = "2"
- ReplicaDataStatusIncomplete = "3"
-)
-
-// performance key ids
-const (
- PerfUtilizationPercent = "18" // usage ratioPerf
- PerfBandwidth = "21" // mbs
- PerfIOPS = "22" // tps
- PerfServiceTime = "29" // excluding queue time(ms)
- PerfCpuUsage = "68" // %
- PerfCacheHitRatio = "303" // %
- PerfLatency = "370" // ms
-)
diff --git a/contrib/drivers/huawei/oceanstor/metrics.go b/contrib/drivers/huawei/oceanstor/metrics.go
deleted file mode 100644
index 865fc6867..000000000
--- a/contrib/drivers/huawei/oceanstor/metrics.go
+++ /dev/null
@@ -1,311 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package oceanstor
-
-import (
- "strconv"
- "time"
-
- log "github.com/golang/glog"
- . "github.com/opensds/opensds/contrib/drivers/utils/config"
- "github.com/opensds/opensds/pkg/model"
- "github.com/opensds/opensds/pkg/utils/config"
- uuid "github.com/satori/go.uuid"
- "gopkg.in/yaml.v2"
-)
-
-/*
-Naming Map:
-metrics --> OceanStor
-iops --> Throughput(IOPS)(IO/s)
-bandwidth --> Bandwidth(MB/s) / Block Bandwidth(MB/s)
-latency --> Average I/O Latency(us)
-service_time --> Service Time(Excluding Queue Time)(ms)
-cache_hit_ratio --> % Hit
-cpu_usage --> CPU Usage(%)
-*/
-// Todo: Move this Yaml config to a file
-// Todo: Add resources for "volume", "disk" and "port".
-var data = `
-resources:
- - resource: pool
- metrics:
- - iops
- - bandwidth
- - latency
- - service_time
- - utilization_prcnt
- units:
- - tps
- - mbs
- - microsecond
- - ms
- - prcnt
- - resource: controller
- metrics:
- - iops
- - bandwidth
- - latency
- - service_time
- - cache_hit_ratio
- - cpu_usage
- units:
- - tps
- - mbs
- - microsecond
- - ms
- - prcnt
- - prcnt`
-
-type Config struct {
- Resource string
- Metrics []string
- Units []string
-}
-
-type Configs struct {
- Cfgs []Config `yaml:"resources"`
-}
-type MetricDriver struct {
- conf *OceanStorConfig
- client *OceanStorClient
-}
-
-func getCurrentUnixTimestamp() int64 {
- now := time.Now()
- secs := now.Unix()
- return secs
-}
-func getMetricToUnitMap(resourceType string) map[string]string {
- //construct metrics to value map
- var configs Configs
- //Read supported metric list from yaml config
- //Todo: Move this to read from file
- source := []byte(data)
-
- err := yaml.Unmarshal(source, &configs)
- if err != nil {
- log.Fatalf("unmarshal error: %v", err)
- }
- metricToUnitMap := make(map[string]string)
- for _, resources := range configs.Cfgs {
- if resources.Resource == resourceType {
- for index, metricName := range resources.Metrics {
- metricToUnitMap[metricName] = resources.Units[index]
- }
- }
- }
- return metricToUnitMap
-}
-
-// getMetricList:- is to get the list of supported metrics for given resource type
-// supportedMetrics -> list of supported metrics
-func (d *MetricDriver) GetMetricList(resourceType string) (supportedMetrics []string, err error) {
- var configs Configs
-
- //Read supported metric list from yaml config
- source := []byte(data)
- err = yaml.Unmarshal(source, &configs)
- if err != nil {
- log.Fatalf("unmarshal error: %v", err)
- }
-
- for _, resources := range configs.Cfgs {
- if resources.Resource == resourceType {
- for _, m := range resources.Metrics {
- supportedMetrics = append(supportedMetrics, m)
- }
- break
- }
- }
-
- return supportedMetrics, nil
-}
-
-func (d *MetricDriver) CollectPerformanceMetrics(resId string, metricList []string) (map[string]float64, error) {
- name2id := map[string]string{
- KMetricIOPS: PerfIOPS,
- KMetricBandwidth: PerfBandwidth,
- KMetricLatency: PerfLatency,
- KMetricServiceTime: PerfServiceTime,
- KMetricUtilizationPercent: PerfUtilizationPercent,
- KMetricCacheHitRatio: PerfCacheHitRatio,
- KMetricCpuUsage: PerfCpuUsage,
- }
-
- var idList = make([]string, len(metricList))
- for i, name := range metricList {
- idList[i] = name2id[name]
- }
-
- perfMap, err := d.client.GetPerformance(resId, idList)
- if err != nil {
- log.Errorf("get performance data failed: %s", err)
- return nil, err
- }
-
- var metricMap = make(map[string]float64)
- for _, name := range metricList {
- v, _ := strconv.ParseFloat(perfMap[name2id[name]], 64)
- metricMap[name] = v
- }
- return metricMap, nil
-}
-
-func (d *MetricDriver) CollectControllerMetrics() ([]*model.MetricSpec, error) {
- // get Metrics to unit map
- metricToUnitMap := getMetricToUnitMap(MetricResourceTypeController)
- //validate metric support list
- supportedMetrics, err := d.GetMetricList(MetricResourceTypeController)
- if supportedMetrics == nil {
- log.Infof("no metrics found in the supported metric list")
- }
- controllers, err := d.client.ListControllers()
- if err != nil {
- log.Errorf("get controller failed: %s", err)
- return nil, err
- }
-
- var tempMetricArray []*model.MetricSpec
- for _, controller := range controllers {
- // TODO: the controller id need to be optional
- resId := ObjectTypeController + ":" + controller.Id
- metricMap, err := d.CollectPerformanceMetrics(resId, supportedMetrics)
- if err != nil {
- log.Errorf("get performance data failed: %s", err)
- return nil, err
- }
-
- for _, element := range supportedMetrics {
- metricValue := &model.Metric{
- Timestamp: getCurrentUnixTimestamp(),
- Value: metricMap[element],
- }
- metricValues := make([]*model.Metric, 0)
- metricValues = append(metricValues, metricValue)
- metric := &model.MetricSpec{
- InstanceID: resId,
- InstanceName: resId,
- Job: "HuaweiOceanStor",
- Labels: map[string]string{"controller": resId},
- Component: MetricResourceTypeController,
- Name: element,
- Unit: metricToUnitMap[element],
- AggrType: "",
- MetricValues: metricValues,
- }
- tempMetricArray = append(tempMetricArray, metric)
- }
- }
- return tempMetricArray, nil
-}
-
-func (d *MetricDriver) CollectPoolMetrics() ([]*model.MetricSpec, error) {
- // get Metrics to unit map
- metricToUnitMap := getMetricToUnitMap(MetricResourceTypePool)
- //validate metric support list
- supportedMetrics, err := d.GetMetricList(MetricResourceTypePool)
- if supportedMetrics == nil {
- log.Infof("no metrics found in the supported metric list")
- }
-
- poolAll, err := d.client.ListStoragePools()
- if err != nil {
- log.Errorf("get controller failed: %s", err)
- return nil, err
- }
- // Filter unsupported pools
- var pools []StoragePool
- for _, p := range poolAll {
- if _, ok := d.conf.Pool[p.Name]; !ok {
- continue
- }
- pools = append(pools, p)
- }
-
- var tempMetricArray []*model.MetricSpec
- for _, pool := range pools {
- // TODO: the controller id need to be optional
- resId := ObjectTypePool + ":" + pool.Id
- metricMap, err := d.CollectPerformanceMetrics(resId, supportedMetrics)
- if err != nil {
- log.Errorf("get performance data failed: %s", err)
- return nil, err
- }
- poolId := uuid.NewV5(uuid.NamespaceOID, pool.Name).String()
- for _, element := range supportedMetrics {
- metricValue := &model.Metric{
- Timestamp: getCurrentUnixTimestamp(),
- Value: metricMap[element],
- }
- metricValues := make([]*model.Metric, 0)
- metricValues = append(metricValues, metricValue)
- metric := &model.MetricSpec{
- InstanceID: poolId,
- InstanceName: pool.Name,
- Job: "HuaweiOceanStor",
- Labels: map[string]string{"pool": poolId},
- Component: MetricResourceTypePool,
- Name: element,
- Unit: metricToUnitMap[element],
- AggrType: "",
- MetricValues: metricValues,
- }
- tempMetricArray = append(tempMetricArray, metric)
- }
- }
- return tempMetricArray, nil
-}
-
-// CollectMetrics: Driver entry point to collect metrics. This will be invoked by the dock
-// []*model.MetricSpec -> the array of metrics to be returned
-func (d *MetricDriver) CollectMetrics() ([]*model.MetricSpec, error) {
- var metricFunList = []func() ([]*model.MetricSpec, error){
- d.CollectControllerMetrics, d.CollectPoolMetrics,
- }
-
- var metricAll []*model.MetricSpec
- for _, f := range metricFunList {
- metric, err := f()
- if err != nil {
- log.Errorf("get metric failed: %v", err)
- return nil, err
- }
- metricAll = append(metricAll, metric...)
- }
-
- return metricAll, nil
-}
-
-func (d *MetricDriver) Setup() (err error) {
- // Read huawei oceanstor config file
- path := config.CONF.OsdsDock.Backends.HuaweiOceanStorBlock.ConfigPath
- if "" == path {
- path = defaultConfPath
- }
-
- conf := &OceanStorConfig{}
- Parse(conf, path)
-
- d.conf = conf
- d.client, err = NewClient(&d.conf.AuthOptions)
- if err != nil {
- log.Errorf("Get new client failed, %v", err)
- return err
- }
- return nil
-}
-
-func (*MetricDriver) Teardown() error { return nil }
diff --git a/contrib/drivers/huawei/oceanstor/model.go b/contrib/drivers/huawei/oceanstor/model.go
deleted file mode 100644
index 951d316db..000000000
--- a/contrib/drivers/huawei/oceanstor/model.go
+++ /dev/null
@@ -1,380 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package oceanstor
-
-type Error struct {
- Code int `json:"code"`
- Description string `json:"description"`
-}
-
-type GenericResult struct {
- Data interface{} `json:"data"`
- Error Error `json:"error"`
-}
-
-type Auth struct {
- AccountState int `json:"accountstate"`
- DeviceId string `json:"deviceid"`
- IBaseToken string `json:"iBaseToken"`
- LastLoginIp string `json:"lastloginip"`
- LastLoginTime int `json:"lastlogintime"`
- Level int `json:"level"`
- PwdChanGeTime int `json:"pwdchangetime"`
- UserGroup string `json:"usergroup"`
- UserId string `json:"userid"`
- UserName string `json:"username"`
- UserScope string `json:"userscope"`
-}
-type AuthResp struct {
- Data Auth `json:"data"`
- Error Error `json:"error"`
-}
-
-type StoragePool struct {
- Description string `json:"DESCRIPTION"`
- Id string `json:"ID"`
- Name string `json:"NAME"`
- UserFreeCapacity string `json:"USERFREECAPACITY"`
- UserTotalCapacity string `json:"USERTOTALCAPACITY"`
-}
-type StoragePoolsResp struct {
- Data []StoragePool `json:"data"`
- Error Error `json:"error"`
-}
-
-type Lun struct {
- AllocCapacity string `json:"ALLOCCAPACITY"`
- AllocType string `json:"ALLOCTYPE"`
- Capability string `json:"CAPABILITY"`
- Capacity string `json:"CAPACITY"`
- CapacityAlarmLevel string `json:"CAPACITYALARMLEVEL"`
- Description string `json:"DESCRIPTION"`
- DrsEnable string `json:"DRS_ENABLE"`
- EnableCompression string `json:"ENABLECOMPRESSION"`
- EnableIscsiThinLunThreshold string `json:"ENABLEISCSITHINLUNTHRESHOLD"`
- EnableSmartDedup string `json:"ENABLESMARTDEDUP"`
- ExposedToInitiator string `json:"EXPOSEDTOINITIATOR"`
- ExtendIfSwitch string `json:"EXTENDIFSWITCH"`
- HealthStatus string `json:"HEALTHSTATUS"`
- Id string `json:"ID"`
- IsAdd2LunGroup string `json:"ISADD2LUNGROUP"`
- IsCheckZeroPage string `json:"ISCHECKZEROPAGE"`
- IscsiThinLunThreshold string `json:"ISCSITHINLUNTHRESHOLD"`
- LunMigrationOrigin string `json:"LUNMigrationOrigin"`
- MirrorPolicy string `json:"MIRRORPOLICY"`
- MirrorType string `json:"MIRRORTYPE"`
- Name string `json:"NAME"`
- OwningController string `json:"OWNINGCONTROLLER"`
- ParentId string `json:"PARENTID"`
- ParentName string `json:"PARENTNAME"`
- PrefetChPolicy string `json:"PREFETCHPOLICY"`
- PrefetChValue string `json:"PREFETCHVALUE"`
- RemoteLunId string `json:"REMOTELUNID"`
- RemoteReplicationIds string `json:"REMOTEREPLICATIONIDS"`
- ReplicationCapacity string `json:"REPLICATION_CAPACITY"`
- RunningStatus string `json:"RUNNINGSTATUS"`
- RunningWritePolicy string `json:"RUNNINGWRITEPOLICY"`
- SectorSize string `json:"SECTORSIZE"`
- SnapShotIds string `json:"SNAPSHOTIDS"`
- SubType string `json:"SUBTYPE"`
- ThinCapacityUsage string `json:"THINCAPACITYUSAGE"`
- Type int `json:"TYPE"`
- UsageType string `json:"USAGETYPE"`
- WorkingController string `json:"WORKINGCONTROLLER"`
- WritePolicy string `json:"WRITEPOLICY"`
- Wwn string `json:"WWN"`
- RemoteLunWwn string `json:"remoteLunWwn"`
-}
-
-type LunResp struct {
- Data Lun `json:"data"`
- Error Error `json:"error"`
-}
-
-type LunsResp struct {
- Data []Lun `json:"data"`
- Error Error `json:"error"`
-}
-
-type Snapshot struct {
- CascadedLevel string `json:"CASCADEDLEVEL"`
- CascadedNum string `json:"CASCADEDNUM"`
- ConsumedCapacity string `json:"CONSUMEDCAPACITY"`
- Description string `json:"DESCRIPTION"`
- ExposedToInitiator string `json:"EXPOSEDTOINITIATOR"`
- HealthStatus string `json:"HEALTHSTATUS"`
- Id string `json:"ID"`
- IoClassId string `json:"IOCLASSID"`
- IoPriority string `json:"IOPRIORITY"`
- SourceLunCapacity string `json:"SOURCELUNCAPACITY"`
- Name string `json:"NAME"`
- ParentId string `json:"PARENTID"`
- ParentName string `json:"PARENTNAME"`
- ParentType int `json:"PARENTTYPE"`
- RollBackendTime string `json:"ROLLBACKENDTIME"`
- RollbackRate string `json:"ROLLBACKRATE"`
- RollbackSpeed string `json:"ROLLBACKSPEED"`
- RollbackStartTime string `json:"ROLLBACKSTARTTIME"`
- RollbackTargetObjId string `json:"ROLLBACKTARGETOBJID"`
- RollbackTargetObjName string `json:"ROLLBACKTARGETOBJNAME"`
- RunningStatus string `json:"RUNNINGSTATUS"`
- SourceLunId string `json:"SOURCELUNID"`
- SourceLunName string `json:"SOURCELUNNAME"`
- SubType string `json:"SUBTYPE"`
- TimeStamp string `json:"TIMESTAMP"`
- Type int `json:"TYPE"`
- UserCapacity string `json:"USERCAPACITY"`
- WorkingController string `json:"WORKINGCONTROLLER"`
- Wwn string `json:"WWN"`
- ReplicationCapacity string `json:"replicationCapacity"`
-}
-
-type SnapshotResp struct {
- Data Snapshot `json:"data"`
- Error Error `json:"error"`
-}
-
-type SnapshotsResp struct {
- Data []Snapshot `json:"data"`
- Error Error `json:"error"`
-}
-
-type Initiator struct {
- Id string `json:"ID"`
- Name string `json:"NAME"`
- ParentId string `json:"PARENTID"`
- ParentType string `json:"PARENTTYPE"`
- ParentName string `json:"PARENTNAME"`
-}
-
-type InitiatorResp struct {
- Data Initiator `json:"data"`
- Error Error `json:"error"`
-}
-
-type InitiatorsResp struct {
- Data []Initiator `json:"data"`
- Error Error `json:"error"`
-}
-
-type Host struct {
- Id string `json:"ID"`
- Name string `json:"NAME"`
- OsType string `json:"OPERATIONSYSTEM"`
- Ip string `json:"IP"`
- IsAddToHostGroup bool `json:"ISADD2HOSTGROUP"`
-}
-
-type HostResp struct {
- Data Host `json:"data"`
- Error Error `json:"error"`
-}
-
-type HostsResp struct {
- Data []Host `json:"data"`
- Error Error `json:"error"`
-}
-
-type HostGroup struct {
- Id string `json:"ID"`
- Name string `json:"NAME"`
- Description string `json:"DESCRIPTION"`
- IsAdd2MappingView string `json:"ISADD2MAPPINGVIEW"`
-}
-
-type HostGroupResp struct {
- Data HostGroup `json:"data"`
- Error Error `json:"error"`
-}
-
-type HostGroupsResp struct {
- Data []HostGroup `json:"data"`
- Error Error `json:"error"`
-}
-
-type LunGroup struct {
- Id string `json:"ID"`
- Name string `json:"NAME"`
- Description string `json:"DESCRIPTION"`
- IsAdd2MappingView string `json:"ISADD2MAPPINGVIEW"`
-}
-
-type LunGroupResp struct {
- Data LunGroup `json:"data"`
- Error Error `json:"error"`
-}
-
-type LunGroupsResp struct {
- Data []LunGroup `json:"data"`
- Error Error `json:"error"`
-}
-
-type MappingView struct {
- Id string `json:"ID"`
- Name string `json:"NAME"`
- Description string `json:"DESCRIPTION"`
-}
-
-type MappingViewResp struct {
- Data MappingView `json:"data"`
- Error Error `json:"error"`
-}
-
-type MappingViewsResp struct {
- Data []MappingView `json:"data"`
- Error Error `json:"error"`
-}
-
-type IscsiTgtPort struct {
- EthPortId string `json:"ETHPORTID"`
- Id string `json:"ID"`
- Tpgt string `json:"TPGT"`
- Type int `json:"TYPE"`
-}
-
-type IscsiTgtPortsResp struct {
- Data []IscsiTgtPort `json:"data"`
- Error Error `json:"error"`
-}
-
-type HostAssociateLun struct {
- Id string `json:"ID"`
- AssociateMetadata string `json:"ASSOCIATEMETADATA"`
-}
-
-type HostAssociateLunsResp struct {
- Data []HostAssociateLun `json:"data"`
- Error Error `json:"error"`
-}
-
-type System struct {
- Id string `json:"ID"`
- Name string `json:"NAME"`
- Location string `json:"LOCATION"`
- ProductMode string `json:"PRODUCTMODE"`
- Wwn string `json:"wwn"`
-}
-
-type SystemResp struct {
- Data System `json:"data"`
- Error Error `json:"error"`
-}
-
-type RemoteDevice struct {
- Id string `json:"ID"`
- Name string `json:"NAME"`
- ArrayType string `json:"ARRAYTYPE"`
- HealthStatus string `json:"HEALTHSTATUS"`
- RunningStatus string `json:"RUNNINGSTATUS"`
- Wwn string `json:"WWN"`
-}
-
-type RemoteDevicesResp struct {
- Data []RemoteDevice `json:"data"`
- Error Error `json:"error"`
-}
-
-type ReplicationPair struct {
- Capacity string `json:"CAPACITY"`
- CompressValid string `json:"COMPRESSVALID"`
- EnableCompress string `json:"ENABLECOMPRESS"`
- HealthStatus string `json:"HEALTHSTATUS"`
- Id string `json:"ID"`
- IsDataSync string `json:"ISDATASYNC"`
- IsInCg string `json:"ISINCG"`
- IsPrimary string `json:"ISPRIMARY"`
- IsRollback string `json:"ISROLLBACK"`
- LocalResId string `json:"LOCALRESID"`
- LocalResName string `json:"LOCALRESNAME"`
- LocalResType string `json:"LOCALRESTYPE"`
- PriResDataStatus string `json:"PRIRESDATASTATUS"`
- RecoveryPolicy string `json:"RECOVERYPOLICY"`
- RemoteDeviceId string `json:"REMOTEDEVICEID"`
- RemoteDeviceName string `json:"REMOTEDEVICENAME"`
- RemoteDeviceSn string `json:"REMOTEDEVICESN"`
- RemoteResId string `json:"REMOTERESID"`
- RemoteResName string `json:"REMOTERESNAME"`
- ReplicationMode string `json:"REPLICATIONMODEL"`
- ReplicationProgress string `json:"REPLICATIONPROGRESS"`
- RunningStatus string `json:"RUNNINGSTATUS"`
- SecResAccess string `json:"SECRESACCESS"`
- SecResDataStatus string `json:"SECRESDATASTATUS"`
- Speed string `json:"SPEED"`
- SynchronizeType string `json:"SYNCHRONIZETYPE"`
- SyncLeftTime string `json:"SYNCLEFTTIME"`
- TimeDifference string `json:"TIMEDIFFERENCE"`
- RemTimeoutPeriod string `json:"REMTIMEOUTPERIOD"`
- Type string `json:"TYPE"`
-}
-
-type ReplicationPairResp struct {
- Data ReplicationPair `json:"data"`
- Error Error `json:"error"`
-}
-
-type SimpleStruct struct {
- Id string `json:"ID"`
- Name string `json:"NAME"`
-}
-
-type SimpleResp struct {
- Data []SimpleStruct `json:"data"`
- Error Error `json:"error"`
-}
-
-type FCInitiatorsResp struct {
- Data []FCInitiator `json:"data"`
- Error Error `json:"error"`
-}
-
-type FCInitiator struct {
- Isfree bool `json:"ISFREE"`
- Id string `json:"ID"`
- Type int `json:"TYPE"`
- RunningStatus string `json:"RUNNINGSTATUS"`
- ParentId string `json:"PARENTID"`
- ParentType int `json:"PARENTTYPE"`
-}
-
-type FCTargWWPNResp struct {
- Data []FCTargWWPN `json:"data"`
- Error Error `json:"error"`
-}
-
-type FCTargWWPN struct {
- IniPortWWN string `json:"INITIATOR_PORT_WWN"`
- TargPortWWN string `json:"TARGET_PORT_WWN"`
-}
-
-type ObjCountResp struct {
- Data Count `json:"data"`
- Error Error `json:"error"`
-}
-
-type Count struct {
- Count string `json:"COUNT"`
-}
-
-type Performance struct {
- Uuid string `json:"CMO_STATISTIC_UUID"`
- DataIdList string `json:"CMO_STATISTIC_DATA_ID_LIST"`
- DataList string `json:"CMO_STATISTIC_DATA_LIST"`
- TimeStamp string `json:"CMO_STATISTIC_TIMESTAMP"`
-}
-
-type PerformancesResp struct {
- Data []Performance `json:"data"`
- Error Error `json:"error"`
-}
diff --git a/contrib/drivers/huawei/oceanstor/oceanstor.go b/contrib/drivers/huawei/oceanstor/oceanstor.go
deleted file mode 100644
index 5530342c5..000000000
--- a/contrib/drivers/huawei/oceanstor/oceanstor.go
+++ /dev/null
@@ -1,842 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package oceanstor
-
-import (
- "errors"
- "fmt"
- "os"
- "strings"
-
- log "github.com/golang/glog"
- . "github.com/opensds/opensds/contrib/drivers/utils"
- . "github.com/opensds/opensds/contrib/drivers/utils/config"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- "github.com/opensds/opensds/pkg/utils"
- "github.com/opensds/opensds/pkg/utils/config"
- uuid "github.com/satori/go.uuid"
-)
-
-type Driver struct {
- conf *OceanStorConfig
- client *OceanStorClient
-}
-
-func (d *Driver) Setup() (err error) {
- // Read huawei oceanstor config file
- conf := &OceanStorConfig{}
- d.conf = conf
- path := config.CONF.OsdsDock.Backends.HuaweiOceanStorBlock.ConfigPath
-
- if "" == path {
- path = defaultConfPath
- }
- Parse(conf, path)
- d.client, err = NewClient(&d.conf.AuthOptions)
- if err != nil {
- log.Errorf("Get new client failed, %v", err)
- return err
- }
- return nil
-}
-
-func (d *Driver) Unset() error {
- d.client.logout()
- return nil
-}
-
-func (d *Driver) createVolumeFromSnapshot(opt *pb.CreateVolumeOpts) (*model.VolumeSpec, error) {
- metadata := opt.GetMetadata()
- if metadata["hypermetro"] == "true" && metadata["replication_enabled"] == "true" {
- msg := "Hypermetro and Replication can not be used in the same volume_type"
- log.Error(msg)
- return nil, errors.New(msg)
- }
- snapshot, e1 := d.client.GetSnapshotByName(EncodeName(opt.GetSnapshotId()))
- if e1 != nil {
- log.Infof("Get Snapshot failed : %v", e1)
- return nil, e1
- }
- volumeDesc := TruncateDescription(opt.GetDescription())
- poolId, err1 := d.client.GetPoolIdByName(opt.GetPoolName())
- if err1 != nil {
- return nil, err1
- }
-
- provPolicy := d.conf.Pool[opt.GetPoolName()].Extras.DataStorage.ProvisioningPolicy
- if provPolicy == "" {
- provPolicy = "Thick"
- }
- lun, err := d.client.CreateVolume(EncodeName(opt.GetId()), opt.GetSize(),
- volumeDesc, poolId, provPolicy)
- if err != nil {
- log.Error("Create Volume Failed:", err)
- return nil, err
- }
-
- log.Infof("Create Volume from snapshot, source_lun_id : %s , target_lun_id : %s", snapshot.Id, lun.Id)
- err = utils.WaitForCondition(func() (bool, error) {
- getVolumeResult, getVolumeErr := d.client.GetVolume(lun.Id)
- if nil == getVolumeErr {
- if getVolumeResult.HealthStatus == StatusHealth && getVolumeResult.RunningStatus == StatusVolumeReady {
- return true, nil
- }
- log.V(5).Infof("Current lun HealthStatus : %s , RunningStatus : %s",
- getVolumeResult.HealthStatus, getVolumeResult.RunningStatus)
- return false, nil
- }
- return false, getVolumeErr
-
- }, LunReadyWaitInterval, LunReadyWaitTimeout)
-
- if err != nil {
- log.Error(err)
- d.client.DeleteVolume(lun.Id)
- return nil, err
- }
- err = d.copyVolume(opt, snapshot.Id, lun.Id)
- if err != nil {
- d.client.DeleteVolume(lun.Id)
- return nil, err
- }
- return &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Size: Sector2Gb(lun.Capacity),
- Description: volumeDesc,
- AvailabilityZone: opt.GetAvailabilityZone(),
- Metadata: map[string]string{
- KLunId: lun.Id,
- },
- }, nil
-
-}
-func (d *Driver) copyVolume(opt *pb.CreateVolumeOpts, srcid, tgtid string) error {
- metadata := opt.GetMetadata()
- copyspeed := metadata["copyspeed"]
- luncopyid, err := d.client.CreateLunCopy(EncodeName(opt.GetId()), srcid,
- tgtid, copyspeed)
-
- if err != nil {
- log.Error("Create Lun Copy failed,", err)
- return err
- }
-
- err = d.client.StartLunCopy(luncopyid)
- if err != nil {
- log.Errorf("Start lun: %s copy failed :%v,", luncopyid, err)
- d.client.DeleteLunCopy(luncopyid)
- return err
- }
-
- err = utils.WaitForCondition(func() (bool, error) {
- deleteLunCopyErr := d.client.DeleteLunCopy(luncopyid)
- if nil == deleteLunCopyErr {
- return true, nil
- }
-
- return false, nil
- }, LunCopyWaitInterval, LunCopyWaitTimeout)
-
- if err != nil {
- log.Error(err)
- return err
- }
-
- log.Infof("Copy Volume %s success", tgtid)
- return nil
-}
-
-func (d *Driver) CreateVolume(opt *pb.CreateVolumeOpts) (*model.VolumeSpec, error) {
- if opt.GetSnapshotId() != "" {
- return d.createVolumeFromSnapshot(opt)
- }
- name := EncodeName(opt.GetId())
- desc := TruncateDescription(opt.GetDescription())
- poolId, err := d.client.GetPoolIdByName(opt.GetPoolName())
- if err != nil {
- return nil, err
- }
- provPolicy := d.conf.Pool[opt.GetPoolName()].Extras.DataStorage.ProvisioningPolicy
- if provPolicy == "" {
- provPolicy = "Thick"
- }
- lun, err := d.client.CreateVolume(name, opt.GetSize(), desc, poolId, provPolicy)
- if err != nil {
- log.Error("Create Volume Failed:", err)
- return nil, err
- }
- log.Infof("Create volume %s (%s) success.", opt.GetName(), lun.Id)
- return &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Size: Sector2Gb(lun.Capacity),
- Description: opt.GetDescription(),
- AvailabilityZone: opt.GetAvailabilityZone(),
- Identifier: &model.Identifier{DurableName: lun.Wwn, DurableNameFormat: "NAA"},
- Metadata: map[string]string{
- KLunId: lun.Id,
- },
- }, nil
-}
-
-func (d *Driver) PullVolume(volID string) (*model.VolumeSpec, error) {
- name := EncodeName(volID)
- lun, err := d.client.GetVolumeByName(name)
- if err != nil {
- return nil, err
- }
-
- return &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: volID,
- },
- Size: Sector2Gb(lun.Capacity),
- Description: lun.Description,
- AvailabilityZone: lun.ParentName,
- }, nil
-}
-
-func (d *Driver) DeleteVolume(opt *pb.DeleteVolumeOpts) error {
- lunId := opt.GetMetadata()[KLunId]
- err := d.client.DeleteVolume(lunId)
- if err != nil {
- log.Errorf("Delete volume failed, volume id =%s , Error:%s", opt.GetId(), err)
- return err
- }
- log.Info("Remove volume success, volume id =", opt.GetId())
- return nil
-}
-
-// ExtendVolume ...
-func (d *Driver) ExtendVolume(opt *pb.ExtendVolumeOpts) (*model.VolumeSpec, error) {
- lunId := opt.GetMetadata()[KLunId]
- err := d.client.ExtendVolume(opt.GetSize(), lunId)
- if err != nil {
- log.Error("Extend Volume Failed:", err)
- return nil, err
- }
-
- log.Infof("Extend volume %s (%s) success.", opt.GetName(), opt.GetId())
- return &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Size: opt.GetSize(),
- Description: opt.GetDescription(),
- AvailabilityZone: opt.GetAvailabilityZone(),
- }, nil
-}
-
-func (d *Driver) getTargetInfo() (string, string, error) {
- tgtIp := d.conf.TargetIp
- resp, err := d.client.ListTgtPort()
- if err != nil {
- return "", "", err
- }
- for _, itp := range resp.Data {
- items := strings.Split(itp.Id, ",")
- iqn := strings.Split(items[0], "+")[1]
- items = strings.Split(iqn, ":")
- ip := items[len(items)-1]
- if tgtIp == ip {
- return iqn, ip, nil
- }
- }
- msg := fmt.Sprintf("Not find configuration targetIp: %v in device", tgtIp)
- return "", "", errors.New(msg)
-}
-
-func (d *Driver) InitializeConnection(opt *pb.CreateVolumeAttachmentOpts) (*model.ConnectionInfo, error) {
- if opt.GetAccessProtocol() == ISCSIProtocol {
- return d.InitializeConnectionIscsi(opt)
- }
- if opt.GetAccessProtocol() == FCProtocol {
- return d.InitializeConnectionFC(opt)
- }
- return nil, fmt.Errorf("not supported protocol type: %s", opt.GetAccessProtocol())
-}
-
-func (d *Driver) InitializeConnectionIscsi(opt *pb.CreateVolumeAttachmentOpts) (*model.ConnectionInfo, error) {
-
- lunId := opt.GetMetadata()[KLunId]
- hostInfo := opt.GetHostInfo()
- // Create host if not exist.
- hostId, err := d.client.AddHostWithCheck(hostInfo)
- if err != nil {
- log.Errorf("Add host failed, host name =%s, error: %v", hostInfo.Host, err)
- return nil, err
- }
-
- // Add initiator to the host.
- initiatorName := GetInitiatorName(hostInfo.GetInitiators(), opt.GetAccessProtocol())
- if err = d.client.AddInitiatorToHostWithCheck(hostId, initiatorName); err != nil {
- log.Errorf("Add initiator to host failed, host id=%s, initiator=%s, error: %v", hostId, initiatorName, err)
- return nil, err
- }
-
- // Add host to hostgroup.
- hostGrpId, err := d.client.AddHostToHostGroup(hostId)
- if err != nil {
- log.Errorf("Add host to group failed, host id=%s, error: %v", hostId, err)
- return nil, err
- }
-
- // Mapping lungroup and hostgroup to view.
- if err = d.client.DoMapping(lunId, hostGrpId, hostId); err != nil {
- log.Errorf("Do mapping failed, lun id=%s, hostGrpId=%s, hostId=%s, error: %v",
- lunId, hostGrpId, hostId, err)
- return nil, err
- }
-
- tgtIqn, tgtIp, err := d.getTargetInfo()
- if err != nil {
- log.Error("Get the target info failed,", err)
- return nil, err
- }
- tgtLun, err := d.client.GetHostLunId(hostId, lunId)
- if err != nil {
- log.Error("Get the get host lun id failed,", err)
- return nil, err
- }
- connInfo := &model.ConnectionInfo{
- DriverVolumeType: opt.GetAccessProtocol(),
- ConnectionData: map[string]interface{}{
- "targetDiscovered": true,
- "targetIQN": []string{tgtIqn},
- "targetPortal": []string{tgtIp + ":3260"},
- "discard": false,
- "targetLun": tgtLun,
- },
- }
- return connInfo, nil
-}
-
-func (d *Driver) TerminateConnection(opt *pb.DeleteVolumeAttachmentOpts) error {
- if opt.GetAccessProtocol() == ISCSIProtocol {
- return d.TerminateConnectionIscsi(opt)
- }
- if opt.GetAccessProtocol() == FCProtocol {
- return d.TerminateConnectionFC(opt)
- }
- return fmt.Errorf("not supported protocal type: %s", opt.GetAccessProtocol())
-}
-
-func (d *Driver) TerminateConnectionIscsi(opt *pb.DeleteVolumeAttachmentOpts) error {
- hostId, err := d.client.GetHostIdByName(opt.GetHostInfo().GetHost())
- if err != nil {
- // host id has been delete already, ignore the host not found error
- if IsNotFoundError(err) {
- log.Warningf("host(%s) has been removed already, ignore it. "+
- "Delete volume attachment(%s)success.", hostId, opt.GetId())
- return nil
- }
- return err
- }
- // the name format of there objects blow is: xxxPrefix + hostId
- // the empty xxId means that the specified object has been removed already.
- lunGrpId, err := d.client.FindLunGroup(PrefixLunGroup + hostId)
- if err != nil && !IsNotFoundError(err) {
- return err
- }
- hostGrpId, err := d.client.FindHostGroup(PrefixHostGroup + hostId)
- if err != nil && !IsNotFoundError(err) {
- return err
- }
- viewId, err := d.client.FindMappingView(PrefixMappingView + hostId)
- if err != nil && !IsNotFoundError(err) {
- return err
- }
-
- lunId := opt.GetMetadata()[KLunId]
- if lunGrpId != "" {
- if d.client.IsLunGroupContainLun(lunGrpId, lunId) {
- if err := d.client.RemoveLunFromLunGroup(lunGrpId, lunId); err != nil {
- return err
- }
- }
-
- // if lun group still contains other lun(s), ignore the all the operations blow,
- // and goes back with success status.
- var leftObjectCount = 0
- if leftObjectCount, err = d.client.getObjectCountFromLungroup(lunGrpId); err != nil {
- return err
- }
- if leftObjectCount > 0 {
- log.Infof("Lun group(%s) still contains %d lun(s). "+
- "Delete volume attachment(%s)success.", lunGrpId, leftObjectCount, opt.GetId())
- return nil
- }
- }
-
- if viewId != "" {
- if d.client.IsMappingViewContainLunGroup(viewId, lunGrpId) {
- if err := d.client.RemoveLunGroupFromMappingView(viewId, lunGrpId); err != nil {
- return err
- }
- }
- if d.client.IsMappingViewContainHostGroup(viewId, hostGrpId) {
- if err := d.client.RemoveHostGroupFromMappingView(viewId, hostGrpId); err != nil {
- return err
- }
- }
- if err := d.client.DeleteMappingView(viewId); err != nil {
- return err
- }
- }
-
- if lunGrpId != "" {
- if err := d.client.DeleteLunGroup(lunGrpId); err != nil {
- return err
- }
- }
-
- if hostGrpId != "" {
- if d.client.IsHostGroupContainHost(hostGrpId, hostId) {
- if err := d.client.RemoveHostFromHostGroup(hostGrpId, hostId); err != nil {
- return err
- }
- }
- if err := d.client.DeleteHostGroup(hostGrpId); err != nil {
- return err
- }
- }
-
- initiatorName := GetInitiatorName(opt.GetHostInfo().GetInitiators(), opt.GetAccessProtocol())
- if d.client.IsHostContainInitiator(hostId, initiatorName) {
- if err := d.client.RemoveIscsiFromHost(initiatorName); err != nil {
- return err
- }
- }
-
- fcExist, err := d.client.checkFCInitiatorsExistInHost(hostId)
- if err != nil {
- return err
- }
- iscsiExist, err := d.client.checkIscsiInitiatorsExistInHost(hostId)
- if err != nil {
- return err
- }
- if fcExist || iscsiExist {
- log.Warningf("host (%s) still contains initiator(s), ignore delete it. "+
- "Delete volume attachment(%s)success.", hostId, opt.GetId())
- return nil
- }
-
- if err := d.client.DeleteHost(hostId); err != nil {
- return err
- }
- log.Infof("Delete volume attachment(%s)success.", opt.GetId())
- return nil
-}
-
-func (d *Driver) CreateSnapshot(opt *pb.CreateVolumeSnapshotOpts) (*model.VolumeSnapshotSpec, error) {
- lunId := opt.GetMetadata()[KLunId]
- name := EncodeName(opt.GetId())
- desc := TruncateDescription(opt.GetDescription())
- snap, err := d.client.CreateSnapshot(lunId, name, desc)
- if err != nil {
- return nil, err
- }
- return &model.VolumeSnapshotSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Description: opt.GetDescription(),
- VolumeId: opt.GetVolumeId(),
- Size: 0,
- Metadata: map[string]string{
- KSnapId: snap.Id,
- },
- }, nil
-}
-
-func (d *Driver) PullSnapshot(id string) (*model.VolumeSnapshotSpec, error) {
- name := EncodeName(id)
- snap, err := d.client.GetSnapshotByName(name)
- if err != nil {
- return nil, err
- }
- return &model.VolumeSnapshotSpec{
- BaseModel: &model.BaseModel{
- Id: snap.Id,
- },
- Name: snap.Name,
- Description: snap.Description,
- Size: 0,
- VolumeId: snap.ParentId,
- }, nil
-}
-
-func (d *Driver) DeleteSnapshot(opt *pb.DeleteVolumeSnapshotOpts) error {
- id := opt.GetMetadata()[KSnapId]
- err := d.client.DeleteSnapshot(id)
- if err != nil {
- log.Errorf("Delete volume snapshot failed, volume snapshot id = %s , error: %v", opt.GetId(), err)
- return err
- }
- log.Info("Remove volume snapshot success, volume snapshot id =", opt.GetId())
- return nil
-}
-
-func (d *Driver) ListPools() ([]*model.StoragePoolSpec, error) {
- var pols []*model.StoragePoolSpec
- sp, err := d.client.ListStoragePools()
- if err != nil {
- return nil, err
- }
- for _, p := range sp {
- c := d.conf
- if _, ok := c.Pool[p.Name]; !ok {
- continue
- }
- host, _ := os.Hostname()
- name := fmt.Sprintf("%s:%s:%s", host, d.conf.Endpoints, p.Id)
- pol := &model.StoragePoolSpec{
- BaseModel: &model.BaseModel{
- Id: uuid.NewV5(uuid.NamespaceOID, name).String(),
- },
- Name: p.Name,
- TotalCapacity: Sector2Gb(p.UserTotalCapacity),
- FreeCapacity: Sector2Gb(p.UserFreeCapacity),
- StorageType: c.Pool[p.Name].StorageType,
- Extras: c.Pool[p.Name].Extras,
- AvailabilityZone: c.Pool[p.Name].AvailabilityZone,
- MultiAttach: c.Pool[p.Name].MultiAttach,
- }
- if pol.AvailabilityZone == "" {
- pol.AvailabilityZone = defaultAZ
- }
- pols = append(pols, pol)
- }
- return pols, nil
-}
-
-func (d *Driver) InitializeConnectionFC(opt *pb.CreateVolumeAttachmentOpts) (*model.ConnectionInfo, error) {
- lunId := opt.GetMetadata()[KLunId]
- hostInfo := opt.GetHostInfo()
- // Create host if not exist.
- hostId, err := d.client.AddHostWithCheck(hostInfo)
- if err != nil {
- log.Errorf("Add host failed, host name =%s, error: %v", hostInfo.Host, err)
- return nil, err
- }
-
- // Add host to hostgroup.
- hostGrpId, err := d.client.AddHostToHostGroup(hostId)
- if err != nil {
- log.Errorf("Add host to group failed, host id=%s, error: %v", hostId, err)
- return nil, err
- }
-
- // Not use FC switch
- initiators := GetInitiatorsByProtocol(opt.GetHostInfo().GetInitiators(), opt.GetAccessProtocol())
- tgtPortWWNs, initTargMap, err := d.connectFCUseNoSwitch(opt, initiators, hostId)
- if err != nil {
- return nil, err
- }
-
- // Mapping lungroup and hostgroup to view.
- if err = d.client.DoMapping(lunId, hostGrpId, hostId); err != nil {
- log.Errorf("Do mapping failed, lun id=%s, hostGrpId=%s, hostId=%s, error: %v",
- lunId, hostGrpId, hostId, err)
- return nil, err
- }
-
- tgtLun, err := d.client.GetHostLunId(hostId, lunId)
- if err != nil {
- log.Error("Get the get host lun id failed,", err)
- return nil, err
- }
-
- fcInfo := &model.ConnectionInfo{
- DriverVolumeType: opt.GetAccessProtocol(),
- ConnectionData: map[string]interface{}{
- "targetDiscovered": true,
- "targetWWNs": tgtPortWWNs,
- "volumeId": opt.GetVolumeId(),
- "initiator_target_map": initTargMap,
- "description": "huawei",
- "hostName": opt.GetHostInfo().Host,
- "targetLun": tgtLun,
- },
- }
- return fcInfo, nil
-}
-
-func (d *Driver) connectFCUseNoSwitch(opt *pb.CreateVolumeAttachmentOpts, initiators []string, hostId string) ([]string, map[string][]string, error) {
- wwns := initiators
-
- onlineWWNsInHost, err := d.client.GetHostOnlineFCInitiators(hostId)
- if err != nil {
- return nil, nil, err
- }
- onlineFreeWWNs, err := d.client.GetOnlineFreeWWNs()
- if err != nil {
- return nil, nil, err
- }
- onlineFCInitiators, err := d.client.GetOnlineFCInitiatorOnArray()
- if err != nil {
- return nil, nil, err
- }
-
- var wwnsNew []string
- for _, w := range wwns {
- if d.isInStringArray(w, onlineFCInitiators) {
- wwnsNew = append(wwnsNew, w)
- }
- }
- log.Infof("initialize connection, online initiators on the array:%s", wwnsNew)
-
- if wwnsNew == nil {
- return nil, nil, errors.New("no available host initiator")
- }
-
- for _, wwn := range wwnsNew {
- if !d.isInStringArray(wwn, onlineWWNsInHost) && !d.isInStringArray(wwn, onlineFreeWWNs) {
- wwnsInHost, err := d.client.GetHostFCInitiators(hostId)
- if err != nil {
- return nil, nil, err
- }
- iqnsInHost, err := d.client.GetHostIscsiInitiators(hostId)
- if err != nil {
- return nil, nil, err
- }
- flag, err := d.client.IsHostAssociatedToHostgroup(hostId)
- if err != nil {
- return nil, nil, err
- }
-
- if wwnsInHost == nil && iqnsInHost == nil && flag == false {
- if err = d.client.RemoveHost(hostId); err != nil {
- return nil, nil, err
- }
- }
-
- msg := fmt.Sprintf("host initiator occupied: Can not add FC initiator %s to host %s, please check if this initiator has been added to other host.", wwn, hostId)
- log.Errorf(msg)
- return nil, nil, errors.New(msg)
- }
- }
-
- for _, wwn := range wwnsNew {
- if d.isInStringArray(wwn, onlineFreeWWNs) {
- if err = d.client.AddFCPortTohost(hostId, wwn); err != nil {
- return nil, nil, err
- }
- }
- }
-
- tgtPortWWNs, initTargMap, err := d.client.GetIniTargMap(wwnsNew)
- if err != nil {
- return nil, nil, err
- }
-
- return tgtPortWWNs, initTargMap, nil
-
-}
-
-func (d *Driver) isInStringArray(s string, source []string) bool {
- for _, i := range source {
- if s == i {
- return true
- }
- }
- return false
-}
-
-func (d *Driver) TerminateConnectionFC(opt *pb.DeleteVolumeAttachmentOpts) error {
- // Detach lun
- fcInfo, err := d.detachVolumeFC(opt)
- if err != nil {
- return err
- }
- log.Info(fmt.Sprintf("terminate connection fc, return data is: %s", fcInfo))
- return nil
-}
-
-func (d *Driver) detachVolumeFC(opt *pb.DeleteVolumeAttachmentOpts) (string, error) {
- wwns := GetInitiatorsByProtocol(opt.GetHostInfo().GetInitiators(), opt.GetAccessProtocol())
- lunId := opt.GetMetadata()[KLunId]
-
- log.Infof("terminate connection, wwpns: %s,lun id: %s", wwns, lunId)
-
- hostId, lunGrpId, hostGrpId, viewId, err := d.getMappedInfo(opt.GetHostInfo().GetHost())
- if err != nil {
- return "", err
- }
-
- if lunId != "" && lunGrpId != "" {
- if err := d.client.RemoveLunFromLunGroup(lunGrpId, lunId); err != nil {
- return "", err
- }
- }
-
- var leftObjectCount = -1
- if lunGrpId != "" {
- if leftObjectCount, err = d.client.getObjectCountFromLungroup(lunGrpId); err != nil {
- return "", err
- }
- }
-
- var fcInfo string
- if leftObjectCount > 0 {
- fcInfo = "driver_volume_type: fibre_channel, data: {}"
- } else {
- if fcInfo, err = d.deleteZoneAndRemoveFCInitiators(wwns, hostId, hostGrpId, viewId); err != nil {
- return "", err
- }
-
- if err := d.clearHostRelatedResource(lunGrpId, viewId, hostId, hostGrpId); err != nil {
- return "", err
- }
- }
-
- log.Info(fmt.Sprintf("Return target backend FC info is: %s", fcInfo))
- return fcInfo, nil
-}
-
-func (d *Driver) deleteZoneAndRemoveFCInitiators(wwns []string, hostId, hostGrpId, viewId string) (string, error) {
- tgtPortWWNs, initTargMap, err := d.client.GetIniTargMap(wwns)
- if err != nil {
- return "", err
- }
-
- // Remove the initiators from host if need.
- hostGroupNum, err := d.client.getHostGroupNumFromHost(hostId)
- if err != nil {
- return "", err
- }
- if hostGrpId != "" && hostGroupNum <= 1 || (hostGrpId == "" && hostGroupNum <= 0) {
- fcInitiators, err := d.client.GetHostFCInitiators(hostId)
- if err != nil {
- return "", err
- }
- for _, wwn := range wwns {
- if d.isInStringArray(wwn, fcInitiators) {
- if err := d.client.removeFCFromHost(wwn); err != nil {
- return "", err
- }
- }
- }
- }
-
- return fmt.Sprintf("driver_volume_type: fibre_channel, target_wwns: %s, initiator_target_map: %s", tgtPortWWNs, initTargMap), nil
-}
-
-func (d *Driver) getMappedInfo(hostName string) (string, string, string, string, error) {
- hostId, err := d.client.GetHostIdByName(hostName)
- if err != nil {
- return "", "", "", "", err
- }
-
- lunGrpId, err := d.client.FindLunGroup(PrefixLunGroup + hostId)
- if err != nil {
- return "", "", "", "", err
- }
- hostGrpId, err := d.client.FindHostGroup(PrefixHostGroup + hostId)
- if err != nil {
- return "", "", "", "", err
- }
- viewId, err := d.client.FindMappingView(PrefixMappingView + hostId)
- if err != nil {
- return "", "", "", "", err
- }
-
- return hostId, lunGrpId, hostGrpId, viewId, nil
-}
-
-func (d *Driver) clearHostRelatedResource(lunGrpId, viewId, hostId, hostGrpId string) error {
- if lunGrpId != "" {
- if viewId != "" {
- d.client.RemoveLunGroupFromMappingView(viewId, lunGrpId)
- }
- d.client.DeleteLunGroup(lunGrpId)
- }
- if hostId != "" {
- if hostGrpId != "" {
-
- if viewId != "" {
- d.client.RemoveHostGroupFromMappingView(viewId, hostGrpId)
- }
-
- views, err := d.client.getHostgroupAssociatedViews(hostGrpId)
- if err != nil {
- return err
- }
-
- if len(views) <= 0 {
- if err := d.client.RemoveHostFromHostGroup(hostGrpId, hostId); err != nil {
- return err
- }
- hosts, err := d.client.getHostsInHostgroup(hostGrpId)
- if err != nil {
- return err
- }
-
- if len(hosts) <= 0 {
- if err := d.client.DeleteHostGroup(hostGrpId); err != nil {
- return err
- }
- }
- }
- }
-
- flag, err := d.client.checkFCInitiatorsExistInHost(hostId)
- if err != nil {
- return err
- }
- if !flag {
- if err := d.client.RemoveHost(hostId); err != nil {
- return err
- }
- }
- }
-
- if viewId != "" {
- if err := d.client.DeleteMappingView(viewId); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *Driver) InitializeSnapshotConnection(opt *pb.CreateSnapshotAttachmentOpts) (*model.ConnectionInfo, error) {
- return nil, &model.NotImplementError{S: "method InitializeSnapshotConnection has not been implemented yet."}
-}
-
-func (d *Driver) TerminateSnapshotConnection(opt *pb.DeleteSnapshotAttachmentOpts) error {
- return &model.NotImplementError{S: "method TerminateSnapshotConnection has not been implemented yet."}
-}
-
-func (d *Driver) CreateVolumeGroup(opt *pb.CreateVolumeGroupOpts) (*model.VolumeGroupSpec, error) {
- return nil, &model.NotImplementError{"method CreateVolumeGroup has not been implemented yet"}
-}
-
-func (d *Driver) UpdateVolumeGroup(opt *pb.UpdateVolumeGroupOpts) (*model.VolumeGroupSpec, error) {
- return nil, &model.NotImplementError{"method UpdateVolumeGroup has not been implemented yet"}
-}
-
-func (d *Driver) DeleteVolumeGroup(opt *pb.DeleteVolumeGroupOpts) error {
- return &model.NotImplementError{"method DeleteVolumeGroup has not been implemented yet"}
-}
diff --git a/contrib/drivers/huawei/oceanstor/replication.go b/contrib/drivers/huawei/oceanstor/replication.go
deleted file mode 100644
index f3dc335e0..000000000
--- a/contrib/drivers/huawei/oceanstor/replication.go
+++ /dev/null
@@ -1,602 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package oceanstor
-
-import (
- "fmt"
- "strconv"
- "strings"
- "time"
-
- log "github.com/golang/glog"
- . "github.com/opensds/opensds/contrib/drivers/utils/config"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- "github.com/opensds/opensds/pkg/utils"
- "github.com/opensds/opensds/pkg/utils/config"
-)
-
-// ReplicationDriver
-type ReplicationDriver struct {
- conf *OceanStorConfig
- mgr *ReplicaPairMgr
-}
-
-// Setup
-func (r *ReplicationDriver) Setup() (err error) {
- // Read huawei oceanstor config file
- conf := &OceanStorConfig{}
- r.conf = conf
- path := config.CONF.OsdsDock.Backends.HuaweiOceanStorBlock.ConfigPath
-
- if "" == path {
- path = defaultConfPath
- }
- Parse(conf, path)
- r.mgr, err = NewReplicaPairMgr(conf)
- if err != nil {
- return err
- }
- return nil
-}
-
-// Unset
-func (r *ReplicationDriver) Unset() error { return nil }
-
-// CreateReplication
-func (r *ReplicationDriver) CreateReplication(opt *pb.CreateReplicationOpts) (*model.ReplicationSpec, error) {
- log.Info("oceanstro replication start ...")
- //just be invoked on the primary side.
- if !opt.GetIsPrimary() {
- return &model.ReplicationSpec{}, nil
- }
- pLunId := opt.PrimaryReplicationDriverData[KLunId]
- sLunId := opt.SecondaryReplicationDriverData[KLunId]
- replicationPeriod := strconv.FormatInt(opt.ReplicationPeriod*60, 10)
-
- replicationMode := ReplicaAsyncMode
- if opt.ReplicationMode == model.ReplicationModeSync {
- replicationMode = ReplicaSyncMode
- replicationPeriod = "0"
- }
- resp, err := r.mgr.CreateReplication(pLunId, sLunId, replicationMode, replicationPeriod)
- if err != nil {
- return nil, err
- }
-
- return &model.ReplicationSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Metadata: resp,
- }, nil
-}
-
-func (r *ReplicationDriver) DeleteReplication(opt *pb.DeleteReplicationOpts) error {
- if !opt.GetIsPrimary() {
- return nil
- }
- pairId, ok := opt.GetMetadata()[KPairId]
- var sLunId string
- if opt.SecondaryVolumeId == "" {
- sLunId = opt.SecondaryReplicationDriverData[KLunId]
- }
- if !ok {
- msg := fmt.Sprintf("Can find pair id in metadata")
- log.Errorf(msg)
- return fmt.Errorf(msg)
- }
- return r.mgr.DeleteReplication(pairId, sLunId)
-}
-
-func (r *ReplicationDriver) EnableReplication(opt *pb.EnableReplicationOpts) error {
- if !opt.GetIsPrimary() {
- return nil
- }
- pairId, ok := opt.GetMetadata()[KPairId]
- if !ok {
- msg := fmt.Sprintf("Can find pair id in metadata")
- log.Errorf(msg)
- return fmt.Errorf(msg)
- }
- return r.mgr.localDriver.Enable(pairId, true)
-}
-
-func (r *ReplicationDriver) DisableReplication(opt *pb.DisableReplicationOpts) error {
- if !opt.GetIsPrimary() {
- return nil
- }
- pairId, ok := opt.GetMetadata()[KPairId]
- if !ok {
- msg := fmt.Sprintf("Can find pair id in metadata")
- log.Errorf(msg)
- return fmt.Errorf(msg)
- }
- return r.mgr.localDriver.Split(pairId)
-}
-
-func (r *ReplicationDriver) FailoverReplication(opt *pb.FailoverReplicationOpts) error {
- if !opt.GetIsPrimary() {
- return nil
- }
- pairId, ok := opt.GetMetadata()[KPairId]
- if !ok {
- msg := fmt.Sprintf("Can find pair id in metadata")
- log.Errorf(msg)
- return fmt.Errorf(msg)
- }
- if opt.SecondaryBackendId == model.ReplicationDefaultBackendId {
- return r.mgr.Failover(pairId)
- }
- return r.mgr.Failback(pairId)
-}
-
-func NewReplicaPairMgr(conf *OceanStorConfig) (r *ReplicaPairMgr, err error) {
- r = &ReplicaPairMgr{}
- r.conf = conf
-
- r.localClient, err = NewClient(&conf.AuthOptions)
- if err != nil {
- return nil, err
- }
- r.localOp = NewPairOperation(r.localClient)
- r.localDriver = NewReplicaCommonDriver(conf, r.localOp)
-
- r.remoteClient, err = NewClient(&conf.RemoteAuthOpt)
- if err != nil {
- return nil, err
- }
- r.remoteOp = NewPairOperation(r.remoteClient)
- r.remoteDriver = NewReplicaCommonDriver(conf, r.remoteOp)
-
- return r, nil
-}
-
-type ReplicaPairMgr struct {
- localClient *OceanStorClient
- remoteClient *OceanStorClient
- localOp *PairOperation
- remoteOp *PairOperation
- localDriver *ReplicaCommonDriver
- remoteDriver *ReplicaCommonDriver
- conf *OceanStorConfig
-}
-
-func (r *ReplicaPairMgr) TryGetRemoteWwn() string {
- sys, _ := r.remoteClient.GetArrayInfo()
- return sys.Wwn
-}
-
-func (r *ReplicaPairMgr) TryGetRemoteDevByWwn(wwn string) *RemoteDevice {
- devices, _ := r.localClient.ListRemoteDevices()
- for _, d := range *devices {
- if d.Wwn == wwn {
- return &d
- }
- }
- log.Warningln("Not found remote device")
- return nil
-}
-
-func (r *ReplicaPairMgr) CheckRemoteAvailable() bool {
- wwn := r.TryGetRemoteWwn()
- if wwn == "" {
- return false
- }
- d := r.TryGetRemoteDevByWwn(wwn)
- if d != nil && d.ArrayType == ArrayTypeReplication &&
- d.HealthStatus == HealthStatusNormal && d.RunningStatus == RunningStatusLinkUp {
- return true
- }
- return false
-}
-
-func (r *ReplicaPairMgr) GetRemoteDevInfo() (id, name string) {
- wwn := r.TryGetRemoteWwn()
- if wwn == "" {
- return "", ""
- }
- dev := r.TryGetRemoteDevByWwn(wwn)
- if dev == nil {
- return "", ""
- }
- return dev.Id, dev.Name
-}
-
-func (r *ReplicaPairMgr) WaitVolumeOnline(client *OceanStorClient, lun *Lun, interval, timeout time.Duration) error {
- if lun.RunningStatus == StatusVolumeReady {
- return nil
- }
-
- if interval == -1 {
- interval = DefaultReplicaWaitInterval
- }
- if timeout == -1 {
- timeout = DefaultReplicaWaitTimeout
- }
-
- return utils.WaitForCondition(func() (bool, error) {
- lunInfo, err := client.GetVolume(lun.Id)
- if err != nil {
- log.Errorf("Get lun failed,%v ", err)
- return false, nil
- }
- if lunInfo.RunningStatus == StatusVolumeReady {
- return true, nil
- }
- return false, nil
- }, interval, timeout)
-}
-
-func (r *ReplicaPairMgr) DeletePair(id string) error {
- if !r.localClient.CheckPairExist(id) {
- return nil
- }
- if err := r.localDriver.Split(id); err != nil {
- return err
- }
-
- err := r.localOp.Delete(id)
- return err
-}
-
-func (r *ReplicaPairMgr) CreateReplication(localLunId, rmtLunId, replicationMode string, replicaPeriod string) (map[string]string, error) {
- interval := DefaultReplicaWaitInterval
- timeout := DefaultReplicaWaitTimeout
- var respMap = make(map[string]string)
-
- localLun, err := r.localClient.GetVolume(localLunId)
- if err != nil {
- return nil, err
- }
-
- err = r.WaitVolumeOnline(r.localClient, localLun, interval, timeout)
- if err != nil {
- return nil, err
- }
-
- rmtDevId, rmtDevName := r.GetRemoteDevInfo()
- log.Errorf("rmtDevId:%s, rmtDevName:%s", rmtDevId, rmtDevName)
- if rmtDevId == "" || rmtDevName == "" {
- return nil, fmt.Errorf("get remote deivce info failed")
- }
-
- pair, err := r.localOp.Create(localLun.Id, rmtLunId, rmtDevId, rmtDevName, replicationMode, ReplicaSpeed, replicaPeriod)
- if err != nil {
- return nil, err
- }
- log.Error("start sync ....", pair)
- if err := r.localDriver.Sync(pair.Id, replicationMode == ReplicaSyncMode); err != nil {
- r.DeletePair(pair.Id)
- return nil, err
- }
- respMap[KPairId] = pair.Id
- return respMap, nil
-}
-
-func (r *ReplicaPairMgr) DeleteReplication(pairId, rmtLunId string) error {
- if err := r.DeletePair(pairId); err != nil {
- log.Error("Delete pair failed,", err)
- return err
- }
- return nil
-}
-
-// Failover volumes back to primary backend.
-// The main steps:
-// 1. Switch the role of replication pairs.
-// 2. Copy the second LUN data back to primary LUN.
-// 3. Split replication pairs.
-// 4. Switch the role of replication pairs.
-// 5. Enable replications.
-
-func (r *ReplicaPairMgr) Failback(pairId string) error {
- r.remoteDriver.Enable(pairId, true)
- r.remoteDriver.WaitReplicaReady(pairId)
- r.localDriver.Enable(pairId, false)
- return nil
-}
-
-func (r *ReplicaPairMgr) Failover(pairId string) error {
- return r.remoteDriver.Failover(pairId)
-}
-
-func NewReplicaCommonDriver(conf *OceanStorConfig, op *PairOperation) *ReplicaCommonDriver {
- return &ReplicaCommonDriver{conf: conf, op: op}
-}
-
-type ReplicaCommonDriver struct {
- conf *OceanStorConfig
- op *PairOperation
-}
-
-func (r *ReplicaCommonDriver) ProtectSecond(replicaId string) error {
- replica, err := r.op.GetReplicationInfo(replicaId)
- if err != nil {
- return err
- }
- if replica.SecResAccess == ReplicaSecondRo {
- return nil
- }
- r.op.ProtectSecond(replicaId)
- r.WaitSecondAccess(replicaId, ReplicaSecondRo)
- return nil
-}
-
-func (r *ReplicaCommonDriver) UnprotectSecond(replicaId string) error {
- replica, err := r.op.GetReplicationInfo(replicaId)
- if err != nil {
- return err
- }
- if replica.SecResAccess == ReplicaSecondRw {
- return nil
- }
- r.op.UnprotectSecond(replicaId)
- r.WaitSecondAccess(replicaId, ReplicaSecondRw)
- return nil
-}
-
-func (r *ReplicaCommonDriver) Sync(replicaId string, waitComplete bool) error {
- r.ProtectSecond(replicaId)
- replicaPair, err := r.op.GetReplicationInfo(replicaId)
- if err != nil {
- return err
- }
- expectStatus := []string{
- RunningStatusNormal,
- RunningStatusSync,
- RunningStatusInitialSync,
- }
- if replicaPair.ReplicationMode == ReplicaSyncMode && r.op.isRunningStatus(expectStatus, replicaPair) {
- return nil
- }
- if err := r.op.Sync(replicaId); err != nil {
- return err
- }
- r.WaitExpectState(replicaId, expectStatus, []string{})
- if waitComplete {
- r.WaitReplicaReady(replicaId)
- }
- return nil
-}
-
-func (r *ReplicaCommonDriver) Split(replicaId string) error {
- runningStatus := []string{
- RunningStatusSplit,
- RunningStatusInvalid,
- RunningStatusInterrupted,
- }
- replicaPair, err := r.op.GetReplicationInfo(replicaId)
- if err != nil {
- return err
- }
- if r.op.isRunningStatus(runningStatus, replicaPair) {
- return nil
- }
- err = r.op.Split(replicaId)
- if err != nil {
- log.Errorf("Split replication failed, %v", err)
- return err
- }
- err = r.WaitExpectState(replicaId, runningStatus, []string{})
- if err != nil {
- log.Errorf("Split replication failed, %v", err)
- return err
- }
- return nil
-}
-
-func (r *ReplicaCommonDriver) Enable(replicaId string, waitSyncComplete bool) error {
- replicaPair, err := r.op.GetReplicationInfo(replicaId)
- if err != nil {
- return err
- }
- if !r.op.isPrimary(replicaPair) {
- r.Switch(replicaId)
- }
- return r.Sync(replicaId, waitSyncComplete)
-}
-
-func (r *ReplicaCommonDriver) Switch(replicaId string) error {
- if err := r.Split(replicaId); err != nil {
- return err
- }
-
- if err := r.UnprotectSecond(replicaId); err != nil {
- return err
- }
-
- if err := r.op.Switch(replicaId); err != nil {
- return err
- }
-
- interval := DefaultReplicaWaitInterval
- timeout := DefaultReplicaWaitTimeout
- return utils.WaitForCondition(func() (bool, error) {
- replicaPair, err := r.op.GetReplicationInfo(replicaId)
- if err != nil {
- return false, nil
- }
- return r.op.isPrimary(replicaPair), nil
-
- }, interval, timeout)
-}
-
-func (r *ReplicaCommonDriver) Failover(replicaId string) error {
- replicaPair, err := r.op.GetReplicationInfo(replicaId)
- if err != nil {
- return err
- }
- if r.op.isPrimary(replicaPair) {
- msg := fmt.Sprintf("We should not do switch over on primary array")
- log.Errorf(msg)
- return fmt.Errorf(msg)
- }
- syncStatus := []string{
- RunningStatusSync,
- RunningStatusInitialSync,
- }
- if r.op.isRunningStatus(syncStatus, replicaPair) {
- if err := r.WaitReplicaReady(replicaId); err != nil {
- return err
- }
- }
-
- if err := r.Split(replicaId); err != nil {
- return err
- }
- err = r.op.UnprotectSecond(replicaId)
- return err
-}
-
-func (r *ReplicaCommonDriver) WaitReplicaReady(replicaId string) error {
- log.Info("Wait synchronize complete.")
- runningNormal := []string{
- RunningStatusNormal,
- RunningStatusSynced,
- }
- runningSync := []string{
- RunningStatusSync,
- RunningStatusInitialSync,
- }
- healthNormal := []string{
- ReplicaHealthStatusNormal,
- }
- interval := DefaultReplicaWaitInterval
- timeout := DefaultReplicaWaitTimeout
- return utils.WaitForCondition(func() (bool, error) {
- replicaPair, err := r.op.GetReplicationInfo(replicaId)
- if err != nil {
- return false, nil
- }
- if r.op.isRunningStatus(runningNormal, replicaPair) && r.op.isHealthStatus(healthNormal, replicaPair) {
- return true, nil
- }
- if !r.op.isRunningStatus(runningSync, replicaPair) {
- msg := fmt.Sprintf("wait synchronize failed, running status:%s", replicaPair.RunningStatus)
- return false, fmt.Errorf(msg)
- }
- return false, nil
- }, interval, timeout)
-}
-
-func (r *ReplicaCommonDriver) WaitSecondAccess(replicaId string, accessMode string) {
- interval := DefaultReplicaWaitInterval
- timeout := DefaultReplicaWaitTimeout
- utils.WaitForCondition(func() (bool, error) {
- replicaPair, err := r.op.GetReplicationInfo(replicaId)
- if err != nil {
- return false, nil
- }
- return replicaPair.SecResAccess == accessMode, nil
- }, interval, timeout)
-}
-
-func (r *ReplicaCommonDriver) WaitExpectState(replicaId string, runningStatus, healthStatus []string) error {
- interval := DefaultReplicaWaitInterval
- timeout := DefaultReplicaWaitTimeout
- return utils.WaitForCondition(func() (bool, error) {
- replicaPair, err := r.op.GetReplicationInfo(replicaId)
- if err != nil {
- return false, nil
- }
- if r.op.isRunningStatus(runningStatus, replicaPair) {
- if len(healthStatus) == 0 || r.op.isHealthStatus(healthStatus, replicaPair) {
- return true, nil
- }
- }
- return false, nil
- }, interval, timeout)
-}
-
-func NewPairOperation(client *OceanStorClient) *PairOperation {
- return &PairOperation{client: client}
-}
-
-type PairOperation struct {
- client *OceanStorClient
-}
-
-func (p *PairOperation) isPrimary(replicaPair *ReplicationPair) bool {
- return strings.ToLower(replicaPair.IsPrimary) == "true"
-}
-
-func (p *PairOperation) isRunningStatus(status []string, replicaPair *ReplicationPair) bool {
- return utils.Contained(replicaPair.RunningStatus, status)
-}
-
-func (p *PairOperation) isHealthStatus(status []string, replicaPair *ReplicationPair) bool {
- return utils.Contained(replicaPair.HealthStatus, status)
-}
-
-func (p *PairOperation) Create(localLunId, rmtLunId, rmtDevId, rmtDevName,
- replicationMode, speed, period string) (*ReplicationPair, error) {
- params := map[string]interface{}{
- "LOCALRESID": localLunId,
- "LOCALRESTYPE": ObjectTypeLun,
- "REMOTEDEVICEID": rmtDevId,
- "REMOTEDEVICENAME": rmtDevName,
- "REMOTERESID": rmtLunId,
- "REPLICATIONMODEL": replicationMode,
- // recovery policy. 1: auto, 2: manual
- "RECOVERYPOLICY": "1",
- "SPEED": speed,
- }
-
- if replicationMode == ReplicaAsyncMode {
- // Synchronize type values:
- // 1, manual
- // 2, timed wait when synchronization begins
- // 3, timed wait when synchronization ends
- params["SYNCHRONIZETYPE"] = "2"
- params["TIMINGVAL"] = period
- }
- log.Error(params)
- pair, err := p.client.CreatePair(params)
- if err != nil {
- log.Errorf("Create pair failed,%v", err)
- return nil, err
- }
- return pair, nil
-}
-
-func (p *PairOperation) Split(id string) error {
- return p.client.SplitPair(id)
-}
-
-func (p *PairOperation) Delete(id string) error {
- return p.client.DeletePair(id)
-}
-
-func (p *PairOperation) ProtectSecond(id string) error {
- return p.client.SetPairSecondAccess(id, ReplicaSecondRo)
-}
-
-func (p *PairOperation) UnprotectSecond(id string) error {
- return p.client.SetPairSecondAccess(id, ReplicaSecondRw)
-
-}
-
-func (p *PairOperation) Sync(id string) error {
- return p.client.SyncPair(id)
-}
-
-func (p *PairOperation) Switch(id string) error {
- return p.client.SwitchPair(id)
-}
-
-func (p *PairOperation) GetReplicationInfo(id string) (*ReplicationPair, error) {
- return p.client.GetPair(id)
-}
diff --git a/contrib/drivers/huawei/oceanstor/replication_test.go b/contrib/drivers/huawei/oceanstor/replication_test.go
deleted file mode 100644
index 783062440..000000000
--- a/contrib/drivers/huawei/oceanstor/replication_test.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package oceanstor
-
-import (
- "testing"
-)
-
-func TestLoadConf(t *testing.T) {
-}
-
-func TestCreateReplication(t *testing.T) {
-}
-
-func TestEnableReplication(t *testing.T) {
-}
-
-func TestDisableReplication(t *testing.T) {
-
-}
-
-func TestFailoverReplication(t *testing.T) {
-
-}
-
-func TestDeleteReplication(t *testing.T) {
-
-}
diff --git a/contrib/drivers/huawei/oceanstor/testdata/oceanstor.yaml b/contrib/drivers/huawei/oceanstor/testdata/oceanstor.yaml
deleted file mode 100644
index 897109f2a..000000000
--- a/contrib/drivers/huawei/oceanstor/testdata/oceanstor.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-authOptions:
- endpoints: "https://8.46.185.114:8088/deviceManager/rest"
- username: "opensds"
- password: "Opensds@123"
- # Whether to encrypt the password. If enabled, the value of the password must be ciphertext.
- EnableEncrypted: false
- # Encryption and decryption tool. Default value is aes. The decryption tool can only decrypt the corresponding ciphertext.
- PwdEncrypter: "aes"
- insecure: true
-
-replication:
- remoteAuthOptions:
- endpoints: "https://8.46.185.104:8088/deviceManager/rest"
- username: "opensds"
- password: "Opensds@123"
- insecure: true
-
-pool:
- StoragePool001:
- storageType: block
- availabilityZone: oceanstor1
- multiAttach: true
- extras:
- dataStorage:
- provisioningPolicy: Thin
- compression: true
- deduplication: true
- ioConnectivity:
- accessProtocol: iscsi
- maxIOPS: 1000
- advanced:
- diskType: SSD
- throughput: 1000
-targetIp: 8.46.192.247
diff --git a/contrib/drivers/ibm/spectrumscale/cli.go b/contrib/drivers/ibm/spectrumscale/cli.go
deleted file mode 100644
index a292bd6bb..000000000
--- a/contrib/drivers/ibm/spectrumscale/cli.go
+++ /dev/null
@@ -1,264 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package spectrumscale
-
-import (
- "strconv"
- "strings"
- "time"
-
- "github.com/appleboy/easyssh-proxy"
- "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/utils/exec"
-)
-
-type MakeConfig struct {
- User string
- Server string
- Password string
- Port string
- Timeout time.Duration
-}
-
-func Executer() *easyssh.MakeConfig {
- ssh := &easyssh.MakeConfig{
- User: username,
- Server: defaultTgtBindIp,
- Password: password,
- Port: port,
- Timeout: timeoutForssh * time.Second,
- }
- return ssh
-}
-
-type Cli struct {
- // Command executer
- BaseExecuter exec.Executer
- // Command Root executer
- RootExecuter exec.Executer
-}
-
-func login() error {
- stdout, stderr, done, err := Executer().Run("uname", timeoutForssh*time.Second)
- if err != nil {
- glog.Errorf("unable to establish connection, stderr:%v", stderr)
- return err
- }
- glog.Infof("connection established. stdout:%v done:%v", stdout, done)
- return nil
-}
-
-func NewCli() (*Cli, error) {
- return &Cli{
- BaseExecuter: exec.NewBaseExecuter(),
- RootExecuter: exec.NewRootExecuter(),
- }, nil
-}
-
-func (c *Cli) execute(cmd ...string) (string, error) {
- return c.RootExecuter.Run(cmd[0], cmd[1:]...)
-}
-
-// get the spectrumscale cluster status
-func (c *Cli) GetSpectrumScaleStatus() error {
- createCmd := "mmgetstate"
- stdout, stderr, done, err := Executer().Run(createCmd, timeoutForssh*time.Second)
- if err != nil {
- glog.Errorf("failed to execute command. stderr:%v", stderr)
- return err
- }
- // above command was successfull with some output
- glog.Infof("command execution was successful. stdout:%v done:%v", stdout, done)
-
- // now parse the output lines to get the status of spectrumscale cluster
- // the expected state is active
- lines := strings.Split(stdout, "\n")
- if !(strings.Contains(lines[2], "active")) {
- glog.Errorf("cluster state is not active")
- return err
- }
- glog.Infof("cluster state is active")
- return nil
-}
-
-// get spectrumscale mount point
-func (c *Cli) GetSpectrumScaleMountPoint() (string, string, error) {
- createCmd := "mmlsfs all -T"
- stdout, stderr, done, err := Executer().Run(createCmd, timeoutForssh*time.Second)
- if err != nil {
- glog.Errorf("failed to list all mountpoint. stderr:%v", stderr)
- return "", "", err
- }
- glog.Infof("the list of mountpoints: stdout:%v, done:%v", stdout, done)
- // now parse the output lines to get mountPoint
- // the example of mountPoint is /ibm/gpfs/fs1
- var mountPoint string
- lines := strings.Split(stdout, "\n")
- for _, line := range lines {
- if strings.Contains(line, "-T") != true {
- continue
- }
- field := strings.Fields(line)
- mountPoint = field[1]
- }
- glog.Infof("the mountpoint is:%v", mountPoint)
-
- // now get the filesystem
- field := strings.Split(mountPoint, "/")
- length := len(field)
- filesystem := field[length-1]
-
- return mountPoint, filesystem, nil
-}
-
-// create volume
-func (c *Cli) CreateVolume(name string, size, filesystem, mountpoint string) error {
- createCmd := "mmcrfileset" + " " + "fs1" + " " + name + " " + "--inode-space" + " " + "new"
- stdout, stderr, done, err := Executer().Run(createCmd, timeoutForssh*time.Second)
- if err != nil {
- glog.Errorf("failed to create fileset. stderr:%v", stderr)
- return err
- }
- glog.Infof("fileset is successfully created. stdout:%v, done:%v", stdout, done)
-
- // now link the fileset with filesystem
- linkCmd := "mmlinkfileset" + " " + filesystem + " " + name + " " + "-J " + mountpoint + "/" + name
- stdout, stderr, done, err = Executer().Run(linkCmd, timeoutForssh*time.Second)
- if err != nil {
- glog.Errorf("failed to link fileset. stderr:%v", stderr)
- return err
- }
- glog.Infof("fileset is successfully linked. stdout:%v, done:%v", stdout, done)
-
- // now set the quota on fileset. Its nothing but allocating the size for fileset
- // for example: mmsetquota fs1:vol8 --block 1G:2G --files 10K:11K
- quotaCmd := "mmsetquota" + " " + "fs1" + ":" + name + " --block" + " " + size + "G" + ":" + size + "G"
- stdout, stderr, done, err = Executer().Run(quotaCmd, timeoutForssh*time.Second)
- if err != nil {
- glog.Errorf("failed to set the quota on fileset. stderr:%v", stderr)
- return err
- }
- glog.Infof("quota is successfully set on fileset. stdout:%v, done:%v", stdout, done)
- glog.Infof("volume:%v is successfuly created with size:%v", name, size)
- return err
-}
-
-// delete volume
-func (c *Cli) Delete(name string) error {
- unlinkCmd := "mmunlinkfileset" + " " + "fs1" + " " + name
- stdout, stderr, done, err := Executer().Run(unlinkCmd, timeoutForssh*time.Second)
- if err != nil {
- glog.Errorf("failed unlink the fileset. stderr:%v", stderr)
- return err
- }
- glog.Infof("filset unlinking successful.stdout:%v, done:%v", stdout, done)
-
- // once unlinking success, delete the fileset
- delCmd := "mmdelfileset" + " " + "fs1" + " " + name + " " + "-f"
- stdout, stderr, done, err = Executer().Run(delCmd, timeoutForssh*time.Second)
- if err != nil {
- glog.Errorf("failed delete the fileset. stderr:%v", stderr)
- return err
- }
- glog.Infof("fileset is successfully deleted. stdout:%v, done:%v", stdout, done)
- glog.Infof("volume:%v is successfuly deleted", name)
- return nil
-}
-
-// this is function for extending the volume size
-func (c *Cli) ExtendVolume(name string, newSize string) error {
- quotaCmd := "mmsetquota" + " " + "fs1" + ":" + name + " --block" + " " + newSize + "G" + ":" + newSize + "G"
- stdout, stderr, done, err := Executer().Run(quotaCmd, timeoutForssh*time.Second)
- if err != nil {
- glog.Errorf("failed extend the quota size on fileset. stderr:%v", stderr)
- return err
- }
- glog.Infof("quota is extended successfully. stdout:%v, done:%v", stdout, done)
- glog.Infof("volume:%v is extended successfully with newsize:%v", name, newSize)
- return nil
-}
-
-// this is function for creating the snapshot
-func (c *Cli) CreateSnapshot(snapName, volName string) error {
- cmd := "mmcrsnapshot" + " " + "fs1" + " " + snapName + " " + "-j" + " " + volName
- stdout, stderr, done, err := Executer().Run(cmd, timeoutForssh*time.Second)
- if err != nil {
- glog.Errorf("failed to create snapshot. stderr:%v", stderr)
- return err
- }
- glog.Infof("stdout:%v done:%v", stdout, done)
- glog.Infof("snapshot:%v is created successfully for volume:%v", snapName, volName)
- return nil
-}
-
-// this is function for deleting the snapshot
-func (c *Cli) DeleteSnapshot(volName, snapName string) error {
- cmd := "mmdelsnapshot" + " " + "fs1" + " " + volName + ":" + snapName
- stdout, stderr, done, err := Executer().Run(cmd, timeoutForssh*time.Second)
- glog.Infof("stdout:%v stderr:%v done:%v", stdout, stderr, done)
- if err != nil {
- glog.Errorf("failed to delete snapshot. stderr:%v", stderr)
- return err
- }
- glog.Infof("stdout:%v done:%v", stdout, done)
- glog.Infof("snapshot:%v is deleted successfully.", snapName)
- return nil
-}
-
-type Pools struct {
- Name string
- TotalCapacity int64
- FreeCapacity int64
- UUID string
-}
-
-// this function is for discover all the pool from spectrumscale cluster
-func (c *Cli) ListPools(mountPoint, filesystem string) (*[]Pools, error) {
- cmd := "mmlspool" + " " + filesystem
- stdout, stderr, done, err := Executer().Run(cmd, timeoutForssh*time.Second)
- glog.Infof("stdout:%v stderr:%v done:%v", stdout, stderr, done)
- if err != nil {
- glog.Errorf("failed to list all pools. stderr:%v", stderr)
- return nil, err
- }
- glog.Infof("the list of pools are: stdout:%v, done:%v", stdout, done)
-
- // now parse the lines to get all pools
- lines := strings.Split(stdout, "\n")
- var pols []Pools
- for _, line := range lines {
- if len(line) == 0 {
- continue
- }
- fields := strings.Fields(line)
- if fields[0] == "Storage" {
- continue
- }
- if fields[0] == "Name" {
- continue
- }
-
- total, _ := strconv.ParseFloat(fields[6], 64)
- free, _ := strconv.ParseFloat(fields[7], 64)
- pool := Pools{
- Name: fields[0],
- TotalCapacity: int64(total / 1000000),
- FreeCapacity: int64(free / 1000000),
- UUID: fields[1],
- }
- pols = append(pols, pool)
- }
- return &pols, nil
-}
diff --git a/contrib/drivers/ibm/spectrumscale/spectrumscale.go b/contrib/drivers/ibm/spectrumscale/spectrumscale.go
deleted file mode 100644
index d3ce9d7e7..000000000
--- a/contrib/drivers/ibm/spectrumscale/spectrumscale.go
+++ /dev/null
@@ -1,280 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package spectrumscale
-
-import (
- "strconv"
- "strings"
-
- log "github.com/golang/glog"
- . "github.com/opensds/opensds/contrib/drivers/utils/config"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- "github.com/opensds/opensds/pkg/utils/config"
- uuid "github.com/satori/go.uuid"
-)
-
-const (
- defaultTgtConfDir = "/etc/tgt/conf.d"
- defaultTgtBindIp = "127.0.0.1"
- username = "root"
- password = "ibm"
- port = "2022"
- defaultConfPath = "/etc/opensds/driver/spectrumscale.yaml"
- volumePrefix = "volume-"
- snapshotPrefix = "_snapshot-"
- blocksize = 4096
- sizeShiftBit = 30
- opensdsnvmepool = "opensds-nvmegroup"
- nvmeofAccess = "nvmeof"
- iscsiAccess = "iscsi"
- storageType = "block"
- timeoutForssh = 60
-)
-
-const (
- KLvIdFormat = "NAA"
- FileSetPath = "FilesetPath"
- SnapshotName = "SnapshotName"
-)
-
-type IBMConfig struct {
- TgtBindIp string `yaml:"tgtBindIp"`
- UserName string `yaml:"username"`
- Password string `yaml:"password"`
- Port string `yaml:"port"`
- TgtConfDir string `yaml:"tgtConfDir"`
- EnableChapAuth bool `yaml:"enableChapAuth"`
- Pool map[string]PoolProperties `yaml:"pool,flow"`
-}
-
-type Driver struct {
- conf *IBMConfig
- cli *Cli
-}
-
-func (d *Driver) Setup() error {
- // Read ibm config file
- d.conf = &IBMConfig{
- TgtBindIp: defaultTgtBindIp,
- TgtConfDir: defaultTgtConfDir,
- UserName: username,
- Port: port,
- Password: password,
- }
- p := config.CONF.OsdsDock.Backends.IBMSpectrumScale.ConfigPath
- if "" == p {
- p = defaultConfPath
- }
- if _, err := Parse(d.conf, p); err != nil {
- return err
- }
- err := login()
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (*Driver) Unset() error { return nil }
-
-// first get the status of spectrumstate. If it is not active just return
-func (d *Driver) CreateVolume(opt *pb.CreateVolumeOpts) (vol *model.VolumeSpec, err error) {
- err = d.cli.GetSpectrumScaleStatus()
- if err != nil {
- log.Error("the GPFS cluster is not active")
- return &model.VolumeSpec{}, err
- }
-
- // if spectrumscale service is active, get the mountPoint and filesystem
- var mountPoint, filesystem string
- mountPoint, filesystem, err = d.cli.GetSpectrumScaleMountPoint()
- if err != nil {
- log.Error("not able to find spectrumscale mount point")
- return &model.VolumeSpec{}, err
- }
- log.Infof("the cluster filesystem name:%v and mounpoint is:%v", filesystem, mountPoint)
-
- log.Info("IBM driver receive create volume request, vr =", opt)
- var volName = volumePrefix + opt.GetId()
- var volSize = opt.GetSize()
- size := strconv.FormatInt(int64(volSize), 10)
- if err = d.cli.CreateVolume(volName, size, filesystem, mountPoint); err != nil {
- log.Errorf("failed to create spectrumscale volume.")
- return &model.VolumeSpec{}, err
- }
-
- return &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Size: opt.GetSize(),
- Description: opt.GetDescription(),
- Identifier: &model.Identifier{DurableName: opt.GetId(),
- DurableNameFormat: KLvIdFormat,
- },
- Metadata: map[string]string{
- FileSetPath: mountPoint + "/" + volName,
- },
- }, nil
-}
-
-// discover the pool from spectrumscale
-func (d *Driver) ListPools() ([]*model.StoragePoolSpec, error) {
- var mountPoint, filesystem string
- mountPoint, filesystem, stderr := d.cli.GetSpectrumScaleMountPoint()
- if stderr != nil {
- log.Error("failed to get mountpoint")
- return nil, stderr
- }
- log.Infof("the cluster filesystem name:%v and mounpoint is:%v", filesystem, mountPoint)
-
- pools, err := d.cli.ListPools(mountPoint, filesystem)
- if err != nil {
- return nil, err
- }
- // retrive the all details from spectrumscale pool
- var pols []*model.StoragePoolSpec
- for _, pool := range *pools {
- pol := &model.StoragePoolSpec{
- BaseModel: &model.BaseModel{
- Id: uuid.NewV5(uuid.NamespaceOID, pool.UUID).String(),
- },
- Name: pool.Name,
- TotalCapacity: pool.TotalCapacity,
- FreeCapacity: pool.FreeCapacity,
- StorageType: storageType,
- Extras: d.conf.Pool[pool.Name].Extras,
- AvailabilityZone: d.conf.Pool[pool.Name].AvailabilityZone,
- MultiAttach: d.conf.Pool[pool.Name].MultiAttach,
- }
- if pol.AvailabilityZone == "" {
- pol.AvailabilityZone = "default"
- }
- pols = append(pols, pol)
- }
- return pols, nil
-}
-
-// this function is for deleting the spectrumscale volume(fileset)
-func (d *Driver) DeleteVolume(opt *pb.DeleteVolumeOpts) error {
- fileSetPath := opt.GetMetadata()[FileSetPath]
- field := strings.Split(fileSetPath, "/")
- name := field[3]
- if err := d.cli.Delete(name); err != nil {
- log.Error("failed to remove logic volume:", err)
- return err
- }
- log.Info("volume is successfully deleted!")
- return nil
-}
-
-// this function is for extending the volume(fileset). It sets the quota for block and files
-func (d *Driver) ExtendVolume(opt *pb.ExtendVolumeOpts) (*model.VolumeSpec, error) {
- fileSetPath := opt.GetMetadata()[FileSetPath]
- field := strings.Split(fileSetPath, "/")
- name := field[3]
- var volsize = opt.GetSize()
- size := strconv.FormatInt(int64(volsize), 10)
- if err := d.cli.ExtendVolume(name, size); err != nil {
- log.Error("failed to extend the volume:", err)
- return nil, err
- }
- return &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Size: opt.GetSize(),
- Description: opt.GetDescription(),
- Metadata: opt.GetMetadata(),
- }, nil
-}
-
-// this function is for creating the snapshot of spectrumscale volume(fileset)
-func (d *Driver) CreateSnapshot(opt *pb.CreateVolumeSnapshotOpts) (*model.VolumeSnapshotSpec, error) {
- fileSetPath := opt.GetMetadata()[FileSetPath]
- field := strings.Split(fileSetPath, "/")
- volName := field[3]
- var snapName = opt.GetName()
- if err := d.cli.CreateSnapshot(snapName, volName); err != nil {
- log.Error("failed to create snapshot for volume:", err)
- return nil, err
- }
- return &model.VolumeSnapshotSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Size: opt.GetSize(),
- Description: opt.GetDescription(),
- VolumeId: opt.GetVolumeId(),
- Metadata: map[string]string{
- FileSetPath: fileSetPath,
- SnapshotName: snapName,
- },
- }, nil
-}
-
-// this function is for deleting the snapshot
-func (d *Driver) DeleteSnapshot(opt *pb.DeleteVolumeSnapshotOpts) error {
- fileSetPath := opt.GetMetadata()[FileSetPath]
- field := strings.Split(fileSetPath, "/")
- volName := field[3]
- snapName := opt.GetMetadata()[SnapshotName]
- if err := d.cli.DeleteSnapshot(volName, snapName); err != nil {
- log.Error("failed to delete the snapshot:", err)
- return err
- }
- return nil
-}
-
-func (d *Driver) CreateVolumeGroup(opt *pb.CreateVolumeGroupOpts) (*model.VolumeGroupSpec, error) {
- return nil, &model.NotImplementError{"method CreateVolumeGroup has not been implemented yet"}
-}
-
-func (d *Driver) UpdateVolumeGroup(opt *pb.UpdateVolumeGroupOpts) (*model.VolumeGroupSpec, error) {
- return nil, &model.NotImplementError{"method UpdateVolumeGroup has not been implemented yet"}
-}
-
-func (d *Driver) DeleteVolumeGroup(opt *pb.DeleteVolumeGroupOpts) error {
- return &model.NotImplementError{"method DeleteVolumeGroup has not been implemented yet"}
-}
-
-func (d *Driver) PullVolume(volIdentifier string) (*model.VolumeSpec, error) {
- return nil, &model.NotImplementError{"method CreateVolumeGroup has not been implemented yet"}
-}
-
-func (d *Driver) PullSnapshot(snapIdentifier string) (*model.VolumeSnapshotSpec, error) {
- return nil, &model.NotImplementError{"method CreateVolumeGroup has not been implemented yet"}
-}
-
-func (d *Driver) InitializeSnapshotConnection(opt *pb.CreateSnapshotAttachmentOpts) (*model.ConnectionInfo, error) {
- return nil, &model.NotImplementError{"method CreateVolumeGroup has not been implemented yet"}
-}
-
-func (d *Driver) TerminateSnapshotConnection(opt *pb.DeleteSnapshotAttachmentOpts) error {
- return nil
-}
-
-func (d *Driver) InitializeConnection(opt *pb.CreateVolumeAttachmentOpts) (*model.ConnectionInfo, error) {
- return nil, &model.NotImplementError{"method CreateVolumeGroup has not been implemented yet"}
-}
-
-func (d *Driver) TerminateConnection(opt *pb.DeleteVolumeAttachmentOpts) error {
- return nil
-}
diff --git a/contrib/drivers/lvm/cli.go b/contrib/drivers/lvm/cli.go
deleted file mode 100644
index 38c908ff7..000000000
--- a/contrib/drivers/lvm/cli.go
+++ /dev/null
@@ -1,271 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package lvm
-
-import (
- "fmt"
- "path"
- "strconv"
- "strings"
- "time"
-
- "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/utils"
- "github.com/opensds/opensds/pkg/utils/exec"
-)
-
-type Cli struct {
- // Command executer
- BaseExecuter exec.Executer
- // Command Root executer
- RootExecuter exec.Executer
-}
-
-func NewCli() (*Cli, error) {
- return &Cli{
- BaseExecuter: exec.NewBaseExecuter(),
- RootExecuter: exec.NewRootExecuter(),
- }, nil
-}
-
-func (c *Cli) execute(cmd ...string) (string, error) {
- return c.RootExecuter.Run(cmd[0], cmd[1:]...)
-}
-
-func sizeStr(size int64) string {
- return fmt.Sprintf("%dg", size)
-}
-
-func (c *Cli) CreateVolume(name string, vg string, size int64) error {
- cmd := []string{
- "env", "LC_ALL=C",
- "lvcreate",
- "-Z", "n",
- "-n", name,
- "-L", sizeStr(size),
- vg,
- }
- _, err := c.execute(cmd...)
- return err
-}
-
-func (c *Cli) Exists(name string) bool {
- cmd := []string{
- "env", "LC_ALL=C",
- "lvs",
- "--noheadings",
- "-o", "name",
- }
- out, err := c.execute(cmd...)
- if err != nil {
- return false
- }
- for _, field := range strings.Fields(out) {
- if field == name {
- return true
- }
- }
- return false
-}
-
-// delete volume or snapshot
-func (c *Cli) Delete(name, vg string) error {
- // LV removal seems to be a race with other writers so we enable retry deactivation
- lvmConfig := "activation { retry_deactivation = 1} "
- cmd := []string{
- "env", "LC_ALL=C",
- "lvremove",
- "--config", lvmConfig,
- "-f",
- path.Join(vg, name),
- }
-
- if out, err := c.execute(cmd...); err != nil {
- glog.Infof("Error reported running lvremove: CMD: %s, RESPONSE: %s",
- strings.Join(cmd, " "), out)
- // run_udevadm_settle
- c.execute("udevadm", "settle")
-
- // The previous failing lvremove -f might leave behind
- // suspended devices; when lvmetad is not available, any
- // further lvm command will block forever.
- // Therefore we need to skip suspended devices on retry.
- lvmConfig += "devices { ignore_suspended_devices = 1}"
- cmd := []string{
- "env", "LC_ALL=C",
- "lvremove",
- "--config", lvmConfig,
- "-f",
- path.Join(vg, name),
- }
- if _, err := c.execute(cmd...); err != nil {
- return err
- }
- glog.Infof("Successfully deleted volume: %s after udev settle.", name)
- }
- return nil
-}
-
-func (c *Cli) LvHasSnapshot(name, vg string) bool {
- cmd := []string{
- "env", "LC_ALL=C",
- "lvdisplay",
- "--noheading",
- "-C", "-o",
- "Attr", path.Join(vg, name),
- }
- out, err := c.execute(cmd...)
- if err != nil {
- glog.Error("Failed to display logic volume:", err)
- return false
- }
- out = strings.TrimSpace(out)
- return out[0] == 'o' || out[0] == 'O'
-}
-
-func (c *Cli) LvIsActivate(name, vg string) bool {
- cmd := []string{
- "env", "LC_ALL=C",
- "lvdisplay",
- "--noheading",
- "-C", "-o",
- "Attr", path.Join(vg, name),
- }
- out, err := c.execute(cmd...)
- if err != nil {
- glog.Error("Failed to display logic volume:", err)
- return false
- }
- out = strings.TrimSpace(out)
- return out[4] == 'a'
-}
-
-func (c *Cli) DeactivateLv(name, vg string) error {
- cmd := []string{
- "env", "LC_ALL=C",
- "lvchange",
- "-a", "n",
- path.Join(vg, name),
- }
- if _, err := c.execute(cmd...); err != nil {
- return err
- }
-
- // Wait until lv is deactivated to return in
- // order to prevent a race condition.
- return utils.WaitForCondition(func() (bool, error) {
- return !c.LvIsActivate(name, vg), nil
- }, 500*time.Microsecond, 20*time.Second)
-}
-
-func (c *Cli) ActivateLv(name, vg string) error {
- cmd := []string{
- "env", "LC_ALL=C",
- "lvchange",
- "-a", "y",
- "--yes",
- path.Join(vg, name),
- }
- if _, err := c.execute(cmd...); err != nil {
- return err
- }
- return nil
-}
-
-func (c *Cli) ExtendVolume(name, vg string, newSize int64) error {
- if c.LvHasSnapshot(name, vg) {
- if err := c.DeactivateLv(name, vg); err != nil {
- return err
- }
- defer c.ActivateLv(name, vg)
- }
-
- cmd := []string{
- "env", "LC_ALL=C",
- "lvextend",
- "-L", sizeStr(newSize),
- path.Join(vg, name),
- }
- if _, err := c.execute(cmd...); err != nil {
- return err
- }
- return nil
-}
-
-func (c *Cli) CreateLvSnapshot(name, sourceLvName, vg string, size int64) error {
- cmd := []string{
- "env", "LC_ALL=C",
- "lvcreate",
- "-n", name,
- "-L", sizeStr(size),
- "-p", "r",
- "-s", path.Join(vg, sourceLvName),
- }
- if _, err := c.execute(cmd...); err != nil {
- return err
- }
- return nil
-}
-
-type VolumeGroup struct {
- Name string
- TotalCapacity int64
- FreeCapacity int64
- UUID string
-}
-
-func (c *Cli) ListVgs() (*[]VolumeGroup, error) {
- cmd := []string{
- "env", "LC_ALL=C",
- "vgs",
- "--noheadings",
- "--nosuffix",
- "--unit=g",
- "-o", "name,size,free,uuid",
- }
- out, err := c.execute(cmd...)
- if err != nil {
- return nil, err
- }
- lines := strings.Split(out, "\n")
- var vgs []VolumeGroup
- for _, line := range lines {
- if len(line) == 0 {
- continue
- }
- fields := strings.Fields(line)
- total, _ := strconv.ParseFloat(fields[1], 64)
- free, _ := strconv.ParseFloat(fields[2], 64)
- vg := VolumeGroup{
- Name: fields[0],
- TotalCapacity: int64(total),
- FreeCapacity: int64(free),
- UUID: fields[3],
- }
- vgs = append(vgs, vg)
- }
- return &vgs, nil
-}
-
-func (c *Cli) CopyVolume(src, dest string, size int64) error {
- var count = (size << sizeShiftBit) / blocksize
- _, err := c.execute("dd",
- "if="+src,
- "of="+dest,
- "count="+fmt.Sprint(count),
- "bs="+fmt.Sprint(blocksize),
- )
- return err
-}
diff --git a/contrib/drivers/lvm/lvm.go b/contrib/drivers/lvm/lvm.go
deleted file mode 100755
index 5374822c1..000000000
--- a/contrib/drivers/lvm/lvm.go
+++ /dev/null
@@ -1,593 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package lvm
-
-import (
- "errors"
- "fmt"
- "os"
- "path"
- "runtime"
- "strings"
-
- log "github.com/golang/glog"
- "github.com/opensds/opensds/contrib/backup"
- "github.com/opensds/opensds/contrib/connector"
- "github.com/opensds/opensds/contrib/drivers/lvm/targets"
- . "github.com/opensds/opensds/contrib/drivers/utils"
- . "github.com/opensds/opensds/contrib/drivers/utils/config"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- "github.com/opensds/opensds/pkg/utils"
- "github.com/opensds/opensds/pkg/utils/config"
- uuid "github.com/satori/go.uuid"
-)
-
-const (
- defaultTgtConfDir = "/etc/tgt/conf.d"
- defaultTgtBindIp = "127.0.0.1"
- defaultConfPath = "/etc/opensds/driver/lvm.yaml"
- volumePrefix = "volume-"
- snapshotPrefix = "_snapshot-"
- blocksize = 4096
- sizeShiftBit = 30
- opensdsnvmepool = "opensds-nvmegroup"
- nvmeofAccess = "nvmeof"
- iscsiAccess = "iscsi"
-)
-
-const (
- KLvPath = "lvPath"
- KLvsPath = "lvsPath"
- KLvIdFormat = "NAA"
-)
-
-type LVMConfig struct {
- TgtBindIp string `yaml:"tgtBindIp"`
- TgtConfDir string `yaml:"tgtConfDir"`
- EnableChapAuth bool `yaml:"enableChapAuth"`
- Pool map[string]PoolProperties `yaml:"pool,flow"`
-}
-
-type Driver struct {
- conf *LVMConfig
- cli *Cli
-}
-
-func (d *Driver) Setup() error {
- // Read lvm config file
- d.conf = &LVMConfig{TgtBindIp: defaultTgtBindIp, TgtConfDir: defaultTgtConfDir}
- p := config.CONF.OsdsDock.Backends.LVM.ConfigPath
- if "" == p {
- p = defaultConfPath
- }
- if _, err := Parse(d.conf, p); err != nil {
- return err
- }
- cli, err := NewCli()
- if err != nil {
- return err
- }
- d.cli = cli
-
- return nil
-}
-
-func (*Driver) Unset() error { return nil }
-
-func (d *Driver) downloadSnapshot(bucket, backupId, dest string) error {
- mc, err := backup.NewBackup("multi-cloud")
- if err != nil {
- log.Errorf("get backup driver, err: %v", err)
- return err
- }
-
- if err := mc.SetUp(); err != nil {
- return err
- }
- defer mc.CleanUp()
-
- file, err := os.OpenFile(dest, os.O_RDWR, 0666)
- if err != nil {
- log.Errorf("open lvm snapshot file, err: %v", err)
- return err
- }
- defer file.Close()
-
- metadata := map[string]string{
- "bucket": bucket,
- }
- b := &backup.BackupSpec{
- Metadata: metadata,
- }
-
- if err := mc.Restore(b, backupId, file); err != nil {
- log.Errorf("upload snapshot to multi-cloud failed, err: %v", err)
- return err
- }
- return nil
-}
-
-func (d *Driver) CreateVolume(opt *pb.CreateVolumeOpts) (vol *model.VolumeSpec, err error) {
- var name = volumePrefix + opt.GetId()
- var vg = opt.GetPoolName()
- if err = d.cli.CreateVolume(name, vg, opt.GetSize()); err != nil {
- return
- }
-
- // remove created volume if got error
- defer func() {
- // using return value as the error flag
- if vol == nil {
- if err := d.cli.Delete(name, vg); err != nil {
- log.Error("Failed to remove logic volume:", err)
- }
- }
- }()
-
- var lvPath = path.Join("/dev", vg, name)
- // Create volume from snapshot
- if opt.GetSnapshotId() != "" {
- if opt.SnapshotFromCloud {
- // download cloud snapshot to volume
- data := opt.GetMetadata()
- backupId, ok := data["backupId"]
- if !ok {
- return nil, errors.New("can't find backupId in metadata")
- }
- bucket, ok := data["bucket"]
- if !ok {
- return nil, errors.New("can't find bucket name in metadata")
- }
- err := d.downloadSnapshot(bucket, backupId, lvPath)
- if err != nil {
- log.Errorf("Download snapshot failed, %v", err)
- return nil, err
- }
- } else {
- // copy local snapshot to volume
- var lvsPath = path.Join("/dev", vg, snapshotPrefix+opt.GetSnapshotId())
- if err := d.cli.CopyVolume(lvsPath, lvPath, opt.GetSize()); err != nil {
- log.Error("Failed to create logic volume:", err)
- return nil, err
- }
- }
- }
-
- return &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Size: opt.GetSize(),
- Description: opt.GetDescription(),
- Identifier: &model.Identifier{DurableName: targets.CreateScsiIDFromVolID(opt.GetId()), DurableNameFormat: KLvIdFormat},
- Metadata: map[string]string{
- KLvPath: lvPath,
- },
- }, nil
-}
-
-func (d *Driver) PullVolume(volIdentifier string) (*model.VolumeSpec, error) {
- // Not used , do nothing
- return nil, nil
-}
-
-func (d *Driver) DeleteVolume(opt *pb.DeleteVolumeOpts) error {
-
- var name = volumePrefix + opt.GetId()
- if !d.cli.Exists(name) {
- log.Warningf("Volume(%s) does not exist, nothing to remove", name)
- return nil
- }
-
- lvPath, ok := opt.GetMetadata()[KLvPath]
- if !ok {
- err := errors.New("can't find 'lvPath' in volume metadata")
- log.Error(err)
- return err
- }
-
- field := strings.Split(lvPath, "/")
- vg := field[2]
- if d.cli.LvHasSnapshot(name, vg) {
- err := fmt.Errorf("unable to delete due to existing snapshot for volume: %s", name)
- log.Error(err)
- return err
- }
-
- if err := d.cli.Delete(name, vg); err != nil {
- log.Error("Failed to remove logic volume:", err)
- return err
- }
-
- return nil
-}
-
-// ExtendVolume ...
-func (d *Driver) ExtendVolume(opt *pb.ExtendVolumeOpts) (*model.VolumeSpec, error) {
- var name = volumePrefix + opt.GetId()
- if err := d.cli.ExtendVolume(name, opt.GetPoolName(), opt.GetSize()); err != nil {
- log.Errorf("extend volume(%s) failed, error: %v", name, err)
- return nil, err
- }
- return &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Size: opt.GetSize(),
- Description: opt.GetDescription(),
- Metadata: opt.GetMetadata(),
- }, nil
-}
-
-func (d *Driver) InitializeConnection(opt *pb.CreateVolumeAttachmentOpts) (*model.ConnectionInfo, error) {
- log.V(8).Infof("lvm initialize connection information: %v", opt)
- initiator := GetInitiatorName(opt.GetHostInfo().GetInitiators(), opt.GetAccessProtocol())
- if initiator == "" {
- initiator = "ALL"
- }
-
- hostIP := opt.HostInfo.GetIp()
- if hostIP == "" {
- hostIP = d.conf.TgtBindIp
- }
-
- lvPath, ok := opt.GetMetadata()[KLvPath]
- if !ok {
- err := errors.New("can't find 'lvPath' in volume metadata")
- log.Error(err)
- return nil, err
- }
- var chapAuth []string
- if d.conf.EnableChapAuth {
- chapAuth = []string{utils.RandSeqWithAlnum(20), utils.RandSeqWithAlnum(16)}
- }
-
- // create target according to the pool's access protocol
- accPro := opt.AccessProtocol
- log.Info("accpro:", accPro)
- t := targets.NewTarget(d.conf.TgtBindIp, d.conf.TgtConfDir, accPro)
- expt, err := t.CreateExport(opt.GetVolumeId(), lvPath, hostIP, initiator, chapAuth)
- if err != nil {
- log.Error("Failed to initialize connection of logic volume:", err)
- return nil, err
- }
-
- log.V(8).Infof("lvm ConnectionData: %v", expt)
-
- return &model.ConnectionInfo{
- DriverVolumeType: accPro,
- ConnectionData: expt,
- }, nil
-}
-
-func (d *Driver) TerminateConnection(opt *pb.DeleteVolumeAttachmentOpts) error {
- log.V(8).Infof("TerminateConnection: opt info is %v", opt)
- accPro := opt.AccessProtocol
- t := targets.NewTarget(d.conf.TgtBindIp, d.conf.TgtConfDir, accPro)
- if err := t.RemoveExport(opt.GetVolumeId(), opt.GetHostInfo().GetIp()); err != nil {
- log.Error("failed to terminate connection of logic volume:", err)
- return err
- }
- return nil
-}
-
-func (d *Driver) AttachSnapshot(snapshotId string, lvsPath string) (string, *model.ConnectionInfo, error) {
-
- var err error
- createOpt := &pb.CreateSnapshotAttachmentOpts{
- SnapshotId: snapshotId,
- Metadata: map[string]string{
- KLvsPath: lvsPath,
- },
- HostInfo: &pb.HostInfo{
- Platform: runtime.GOARCH,
- OsType: runtime.GOOS,
- Host: d.conf.TgtBindIp,
- Initiators: []*pb.Initiator{},
- },
- }
-
- info, err := d.InitializeSnapshotConnection(createOpt)
- if err != nil {
- return "", nil, err
- }
-
- // rollback
- defer func() {
- if err != nil {
- deleteOpt := &pb.DeleteSnapshotAttachmentOpts{}
- d.TerminateSnapshotConnection(deleteOpt)
- }
- }()
-
- conn := connector.NewConnector(info.DriverVolumeType)
- mountPoint, err := conn.Attach(info.ConnectionData)
- if err != nil {
- return "", nil, err
- }
- log.V(8).Infof("Attach snapshot success, MountPoint:%s", mountPoint)
- return mountPoint, info, nil
-}
-
-func (d *Driver) DetachSnapshot(snapshotId string, info *model.ConnectionInfo) error {
-
- con := connector.NewConnector(info.DriverVolumeType)
- if con == nil {
- return fmt.Errorf("Can not find connector (%s)!", info.DriverVolumeType)
- }
-
- con.Detach(info.ConnectionData)
- attach := &pb.DeleteSnapshotAttachmentOpts{
- SnapshotId: snapshotId,
- AccessProtocol: info.DriverVolumeType,
- }
- return d.TerminateSnapshotConnection(attach)
-}
-
-func (d *Driver) uploadSnapshot(lvsPath string, bucket string) (string, error) {
- mc, err := backup.NewBackup("multi-cloud")
- if err != nil {
- log.Errorf("get backup driver, err: %v", err)
- return "", err
- }
-
- if err := mc.SetUp(); err != nil {
- return "", err
- }
- defer mc.CleanUp()
-
- file, err := os.Open(lvsPath)
- if err != nil {
- log.Errorf("open lvm snapshot file, err: %v", err)
- return "", err
- }
- defer file.Close()
-
- metadata := map[string]string{
- "bucket": bucket,
- }
- b := &backup.BackupSpec{
- Id: uuid.NewV4().String(),
- Metadata: metadata,
- }
-
- if err := mc.Backup(b, file); err != nil {
- log.Errorf("upload snapshot to multi-cloud failed, err: %v", err)
- return "", err
- }
- return b.Id, nil
-}
-
-func (d *Driver) deleteUploadedSnapshot(backupId string, bucket string) error {
- mc, err := backup.NewBackup("multi-cloud")
- if err != nil {
- log.Errorf("get backup driver failed, err: %v", err)
- return err
- }
-
- if err := mc.SetUp(); err != nil {
- return err
- }
- defer mc.CleanUp()
-
- metadata := map[string]string{
- "bucket": bucket,
- }
- b := &backup.BackupSpec{
- Id: backupId,
- Metadata: metadata,
- }
- if err := mc.Delete(b); err != nil {
- log.Errorf("delete backup snapshot failed, err: %v", err)
- return err
- }
- return nil
-}
-
-func (d *Driver) CreateSnapshot(opt *pb.CreateVolumeSnapshotOpts) (snap *model.VolumeSnapshotSpec, err error) {
- var snapName = snapshotPrefix + opt.GetId()
-
- lvPath, ok := opt.GetMetadata()[KLvPath]
- if !ok {
- err := errors.New("can't find 'lvPath' in snapshot metadata")
- log.Error(err)
- return nil, err
- }
-
- fields := strings.Split(lvPath, "/")
- vg, sourceLvName := fields[2], fields[3]
- if err := d.cli.CreateLvSnapshot(snapName, sourceLvName, vg, opt.GetSize()); err != nil {
- log.Error("Failed to create logic volume snapshot:", err)
- return nil, err
- }
-
- lvsPath := path.Join("/dev", vg, snapName)
- metadata := map[string]string{KLvsPath: lvsPath}
-
- if bucket, ok := opt.Metadata["bucket"]; ok {
- //nvmet right now can not support snap volume serve as nvme target
- if vg == opensdsnvmepool {
- log.Infof("nvmet right now can not support snap volume serve as nvme target")
- log.Infof("still store in nvme pool but initialize connection by iscsi protocol")
- }
- mountPoint, info, err := d.AttachSnapshot(opt.GetId(), lvsPath)
- if err != nil {
- d.cli.Delete(snapName, vg)
- return nil, err
- }
- defer d.DetachSnapshot(opt.GetId(), info)
-
- log.Info("update load snapshot to :", bucket)
- backupId, err := d.uploadSnapshot(mountPoint, bucket)
- if err != nil {
- d.cli.Delete(snapName, vg)
- return nil, err
- }
- metadata["backupId"] = backupId
- metadata["bucket"] = bucket
- }
-
- return &model.VolumeSnapshotSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Size: opt.GetSize(),
- Description: opt.GetDescription(),
- VolumeId: opt.GetVolumeId(),
- Metadata: metadata,
- }, nil
-}
-
-func (d *Driver) PullSnapshot(snapIdentifier string) (*model.VolumeSnapshotSpec, error) {
- // not used, do nothing
- return nil, nil
-}
-
-func (d *Driver) DeleteSnapshot(opt *pb.DeleteVolumeSnapshotOpts) error {
-
- if bucket, ok := opt.Metadata["bucket"]; ok {
- log.Info("remove snapshot in multi-cloud :", bucket)
- if err := d.deleteUploadedSnapshot(opt.Metadata["backupId"], bucket); err != nil {
- return err
- }
- }
-
- lvsPath, ok := opt.GetMetadata()[KLvsPath]
- if !ok {
- err := errors.New("can't find 'lvsPath' in snapshot metadata, ingnore it!")
- log.Error(err)
- return nil
- }
- fields := strings.Split(lvsPath, "/")
- vg, snapName := fields[2], fields[3]
- if !d.cli.Exists(snapName) {
- log.Warningf("Snapshot(%s) does not exist, nothing to remove", snapName)
- return nil
- }
-
- if err := d.cli.Delete(snapName, vg); err != nil {
- log.Error("Failed to remove logic volume:", err)
- return err
- }
-
- return nil
-}
-
-func (d *Driver) ListPools() ([]*model.StoragePoolSpec, error) {
-
- vgs, err := d.cli.ListVgs()
- if err != nil {
- return nil, err
- }
-
- var pols []*model.StoragePoolSpec
- for _, vg := range *vgs {
- if _, ok := d.conf.Pool[vg.Name]; !ok {
- continue
- }
-
- pol := &model.StoragePoolSpec{
- BaseModel: &model.BaseModel{
- Id: uuid.NewV5(uuid.NamespaceOID, vg.UUID).String(),
- },
- Name: vg.Name,
- TotalCapacity: vg.TotalCapacity,
- FreeCapacity: vg.FreeCapacity,
- StorageType: d.conf.Pool[vg.Name].StorageType,
- Extras: d.conf.Pool[vg.Name].Extras,
- AvailabilityZone: d.conf.Pool[vg.Name].AvailabilityZone,
- MultiAttach: d.conf.Pool[vg.Name].MultiAttach,
- }
- if pol.AvailabilityZone == "" {
- pol.AvailabilityZone = "default"
- }
- pols = append(pols, pol)
- }
- return pols, nil
-}
-
-func (d *Driver) InitializeSnapshotConnection(opt *pb.CreateSnapshotAttachmentOpts) (*model.ConnectionInfo, error) {
- initiator := GetInitiatorName(opt.GetHostInfo().GetInitiators(), opt.GetAccessProtocol())
- if initiator == "" {
- initiator = "ALL"
- }
-
- hostIP := opt.HostInfo.GetIp()
- if hostIP == "" {
- hostIP = d.conf.TgtBindIp
- }
-
- lvsPath, ok := opt.GetMetadata()[KLvsPath]
- if !ok {
- err := errors.New("Failed to find logic volume path in volume attachment metadata!")
- log.Error(err)
- return nil, err
- }
- var chapAuth []string
- if d.conf.EnableChapAuth {
- chapAuth = []string{utils.RandSeqWithAlnum(20), utils.RandSeqWithAlnum(16)}
- }
-
- accPro := opt.AccessProtocol
- if accPro == nvmeofAccess {
- log.Infof("nvmet right now can not support snap volume serve as nvme target")
- log.Infof("still create snapshot connection by iscsi")
- accPro = iscsiAccess
- }
- t := targets.NewTarget(d.conf.TgtBindIp, d.conf.TgtConfDir, accPro)
- data, err := t.CreateExport(opt.GetSnapshotId(), lvsPath, hostIP, initiator, chapAuth)
- if err != nil {
- log.Error("Failed to initialize snapshot connection of logic volume:", err)
- return nil, err
- }
-
- return &model.ConnectionInfo{
- DriverVolumeType: accPro,
- ConnectionData: data,
- }, nil
-}
-
-func (d *Driver) TerminateSnapshotConnection(opt *pb.DeleteSnapshotAttachmentOpts) error {
- accPro := opt.AccessProtocol
- if accPro == nvmeofAccess {
- log.Infof("nvmet right now can not support snap volume serve as nvme target")
- log.Infof("still create snapshot connection by iscsi")
- accPro = iscsiAccess
- }
- log.Info("terminate snapshot conn")
- t := targets.NewTarget(d.conf.TgtBindIp, d.conf.TgtConfDir, accPro)
- if err := t.RemoveExport(opt.GetSnapshotId(), opt.GetHostInfo().GetIp()); err != nil {
- log.Error("Failed to terminate snapshot connection of logic volume:", err)
- return err
- }
- return nil
-
-}
-
-func (d *Driver) CreateVolumeGroup(opt *pb.CreateVolumeGroupOpts) (*model.VolumeGroupSpec, error) {
- return nil, &model.NotImplementError{"method CreateVolumeGroup has not been implemented yet"}
-}
-
-func (d *Driver) UpdateVolumeGroup(opt *pb.UpdateVolumeGroupOpts) (*model.VolumeGroupSpec, error) {
- return nil, &model.NotImplementError{"method UpdateVolumeGroup has not been implemented yet"}
-}
-
-func (d *Driver) DeleteVolumeGroup(opt *pb.DeleteVolumeGroupOpts) error {
- return &model.NotImplementError{"method DeleteVolumeGroup has not been implemented yet"}
-}
diff --git a/contrib/drivers/lvm/lvm_metrics.go b/contrib/drivers/lvm/lvm_metrics.go
deleted file mode 100644
index 185757b6f..000000000
--- a/contrib/drivers/lvm/lvm_metrics.go
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package lvm
-
-import (
- "strconv"
- "strings"
- "time"
-
- log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/model"
- "gopkg.in/yaml.v2"
-)
-
-// Supported metrics
-var data = `
-resources:
- - resource: volume
- metrics:
- - iops
- - read_throughput
- - write_throughput
- - response_time
- - service_time
- - utilization
- units:
- - tps
- - kbs
- - kbs
- - ms
- - ms
- - prcnt
- - resource: disk
- metrics:
- - iops
- - read_throughput
- - write_throughput
- - response_time
- - service_time
- - utilization
- units:
- - tps
- - kbs
- - kbs
- - ms
- - ms
- - prcnt`
-
-type Config struct {
- Resource string
- Metrics []string
- Units []string
-}
-
-type Configs struct {
- Cfgs []Config `resources`
-}
-type MetricDriver struct {
- cli *MetricCli
-}
-
-func getCurrentUnixTimestamp() int64 {
- now := time.Now()
- secs := now.Unix()
- return secs
-}
-func getMetricToUnitMap() map[string]string {
-
- //construct metrics to value map
- var configs Configs
- //Read supported metric list from yaml config
- //Todo: Move this to read from file
- source := []byte(data)
-
- error := yaml.Unmarshal(source, &configs)
- if error != nil {
- log.Fatalf("unmarshal error: %v", error)
- }
- metricToUnitMap := make(map[string]string)
- for _, resources := range configs.Cfgs {
- switch resources.Resource {
- //ToDo: Other Cases needs to be added
- case "volume", "disk":
- for index, metricName := range resources.Metrics {
-
- metricToUnitMap[metricName] = resources.Units[index]
-
- }
- }
- }
- return metricToUnitMap
-}
-
-// getMetricList:- is to get the list of supported metrics for given resource type
-// supportedMetrics -> list of supported metrics
-func (d *MetricDriver) GetMetricList(resourceType string) (supportedMetrics []string, err error) {
- var configs Configs
-
- //Read supported metric list from yaml config
- source := []byte(data)
- error := yaml.Unmarshal(source, &configs)
- if error != nil {
- log.Fatalf("unmarshal error: %v", error)
- }
-
- for _, resources := range configs.Cfgs {
- if resources.Resource == resourceType {
- switch resourceType {
- case "volume", "disk":
- for _, m := range resources.Metrics {
- supportedMetrics = append(supportedMetrics, m)
-
- }
- }
- }
- }
-
- return supportedMetrics, nil
-}
-
-// CollectMetrics: Driver entry point to collect metrics. This will be invoked by the dock
-// []*model.MetricSpec -> the array of metrics to be returned
-func (d *MetricDriver) CollectMetrics() ([]*model.MetricSpec, error) {
-
- // get Metrics to unit map
- metricToUnitMap := getMetricToUnitMap()
- //validate metric support list
- supportedMetrics, err := d.GetMetricList("volume")
- if supportedMetrics == nil {
- log.Infof("no metrics found in the supported metric list")
- }
- // discover lvm volumes
- volumeList, vgList, err := d.cli.DiscoverVolumes()
- if err != nil {
- log.Errorf("discover volume function returned error, err: %v", err)
- }
- // discover lvm physical volumes
- DiskList, err := d.cli.DiscoverDisks()
- if err != nil {
- log.Errorf("discover disk returned error, err: %v", err)
- }
- metricMap, labelMap, err := d.cli.CollectMetrics(supportedMetrics)
- if err != nil {
- log.Errorf("collect metrics returned error, err: %v", err)
- }
- var tempMetricArray []*model.MetricSpec
- // fill volume metrics
- for i, volume := range volumeList {
- convrtedVolID := convert(volume, vgList[i])
- aMetricMap := metricMap[convrtedVolID]
- aLabelMap := labelMap[convrtedVolID]
- for _, element := range supportedMetrics {
- val, _ := strconv.ParseFloat(aMetricMap[element], 64)
- metricValue := &model.Metric{
- Timestamp: getCurrentUnixTimestamp(),
- Value: val,
- }
- metricValues := make([]*model.Metric, 0)
- metricValues = append(metricValues, metricValue)
- metric := &model.MetricSpec{
- InstanceID: volume,
- InstanceName: aMetricMap["InstanceName"],
- Job: "lvm",
- Labels: aLabelMap,
- Component: "volume",
- Name: element,
- Unit: metricToUnitMap[element],
- AggrType: "",
- MetricValues: metricValues,
- }
- tempMetricArray = append(tempMetricArray, metric)
- }
- }
- // fill disk metrics
- for _, disk := range DiskList {
- convrtedVolID := formatDiskName(disk)
- aMetricMap := metricMap[convrtedVolID]
- aLabelMap := labelMap[convrtedVolID]
- for _, element := range supportedMetrics {
- val, _ := strconv.ParseFloat(aMetricMap[element], 64)
- metricValue := &model.Metric{
- Timestamp: getCurrentUnixTimestamp(),
- Value: val,
- }
- metricValues := make([]*model.Metric, 0)
- metricValues = append(metricValues, metricValue)
- metric := &model.MetricSpec{
- InstanceID: disk,
- InstanceName: aMetricMap["InstanceName"],
- Job: "lvm",
- Labels: aLabelMap,
- Component: "disk",
- Name: element,
- Unit: metricToUnitMap[element],
- AggrType: "",
- MetricValues: metricValues,
- }
- tempMetricArray = append(tempMetricArray, metric)
- }
- }
- metricArray := tempMetricArray
- return metricArray, err
-}
-func convert(instanceID string, vg string) string {
- // systat utilities (sar/iostat) returnes volume with -- instead of -, so we need to modify volume name to map lvs output
- instanceID = strings.Replace(instanceID, "-", "--", -1)
- vg = strings.Replace(vg, "-", "--", -1)
- //add opensds--volumes--default-- to the start of volume
- instanceID = vg + "-" + instanceID
- return instanceID
-}
-func formatDiskName(instanceID string) string {
- // systat(sar/iostat) returns only disk name. We need to add /dev/ to match with pvs output
- instanceID = strings.Replace(instanceID, "/dev/", "", -1)
- return instanceID
-}
-func (d *MetricDriver) Setup() error {
-
- cli, err := NewMetricCli()
- if err != nil {
- return err
- }
- d.cli = cli
- return nil
-}
-
-func (*MetricDriver) Teardown() error { return nil }
diff --git a/contrib/drivers/lvm/lvm_metrics_test.go b/contrib/drivers/lvm/lvm_metrics_test.go
deleted file mode 100644
index b4f01e4ca..000000000
--- a/contrib/drivers/lvm/lvm_metrics_test.go
+++ /dev/null
@@ -1,252 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package lvm
-
-import (
- "fmt"
- "reflect"
- "strconv"
- "testing"
-
- "github.com/opensds/opensds/pkg/model"
- "github.com/opensds/opensds/pkg/utils/exec"
-)
-
-var metricMap map[string]float64 = map[string]float64{"iops": 3.16, "read_throughput": 4.17, "write_throughput": 134.74, "response_time": 2.67, "service_time": 4.00, "utilization": 1.26}
-var metricToUnitMap map[string]string = map[string]string{"iops": "tps", "read_throughput": "kbs", "write_throughput": "kbs", "response_time": "ms", "service_time": "ms", "utilization": "prcnt"}
-var respMap map[string]*MetricFakeResp = map[string]*MetricFakeResp{
- "sar": {`05:26:43 IST DEV tps rkB/s wkB/s areq-sz aqu-sz await svctm %util
- 05:26:44 loop0 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
- 05:26:44 loop1 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
- 05:26:44 loop2 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
- 05:26:44 loop3 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
- 05:26:44 loop4 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
- 05:26:44 loop5 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
- 05:26:44 loop6 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
- 05:26:44 loop7 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
- 05:26:44 opensds--volumes--default-volume--b902e771--8e02--4099--b601--a6b3881f8 3.16 4.17 134.74 42.67 0.01 2.67 4.00 1.26
- 05:26:44 opensds--volumes--default-volume-d96cc42b-b285-474e-aa98-c61e66df7461 6.26.00 8.27 268.74 84.67 0.02 4.67 8.00 2.46
- 05:26:44 loop9 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
- 05:26:44 loop10 8.77 12.22 32.56 78.01 0.00 0.01 0.01 12.21`, nil},
- "lvs": {`LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
-volume-b902e771-8e02-4099-b601-a6b3881f8 opensds-volumes-default -wi-a----- 1.00g
-volume-d96cc42b-b285-474e-aa98-c61e66df7461 opensds-volumes-default -wi-a----- 1.00g`, nil},
- "pvs": {`PV VG Fmt Attr PSize PFree
-/dev/loop10 opensds-volumes-default lvm2 a-- <20.00g <18.00g`, nil},
-}
-var expectdVgs []string = []string{"opensds-volumes-default", "opensds-volumes-default"}
-var expctdMetricList []string = []string{"iops", "read_throughput", "write_throughput", "response_time", "service_time", "utilization"}
-var expctedVolList []string = []string{"volume-b902e771-8e02-4099-b601-a6b3881f8", "volume-d96cc42b-b285-474e-aa98-c61e66df7461"}
-var expctedDiskList []string = []string{"/dev/loop10"}
-var expctedLabelMap map[string]map[string]string = map[string]map[string]string{"loop10": map[string]string{"device": "loop10"}, "opensds--volumes--default-volume-d96cc42b-b285-474e-aa98-c61e66df7461": map[string]string{"device": "opensds--volumes--default-volume-d96cc42b-b285-474e-aa98-c61e66df7461"}, "opensds--volumes--default-volume--b902e771--8e02--4099--b601--a6b3881f8": map[string]string{"device": "opensds--volumes--default-volume--b902e771--8e02--4099--b601--a6b3881f8"}}
-var expctedMetricMap map[string]map[string]string = map[string]map[string]string{"loop10": map[string]string{"InstanceName": "loop10", "iops": "8.77", "read_throughput": "12.22", "response_time": "0.01", "service_time": "0.01", "write_throughput": "32.56", "utilization": "12.21"},
- "opensds--volumes--default-volume-d96cc42b-b285-474e-aa98-c61e66df7461": map[string]string{"InstanceName": "opensds--volumes--default-volume-d96cc42b-b285-474e-aa98-c61e66df7461", "iops": "6.26", "read_throughput": "8.27", "response_time": "4.67", "service_time": "8.00", "write_throughput": "268.74", "utilization": "2.46"},
- "opensds--volumes--default-volume--b902e771--8e02--4099--b601--a6b3881f8": map[string]string{"InstanceName": "opensds--volumes--default-volume--b902e771--8e02--4099--b601--a6b3881f8", "iops": "3.16", "read_throughput": "4.17", "response_time": "2.67", "service_time": "4.00", "write_throughput": "134.74", "utilization": "1.26"}}
-
-func TestMetricDriverSetup(t *testing.T) {
- var d = &MetricDriver{}
- if err := d.Setup(); err != nil {
- t.Errorf("setup lvm metric driver failed: %+v\n", err)
- }
-}
-
-type MetricFakeExecuter struct {
- RespMap map[string]*MetricFakeResp
-}
-
-type MetricFakeResp struct {
- out string
- err error
-}
-
-func (f *MetricFakeExecuter) Run(name string, args ...string) (string, error) {
- var cmd = name
- if name == "env" {
- cmd = args[1]
- }
- v, ok := f.RespMap[cmd]
- if !ok {
- return "", fmt.Errorf("can't find specified op: %s", args[1])
- }
- return v.out, v.err
-}
-
-func NewMetricFakeExecuter(respMap map[string]*MetricFakeResp) exec.Executer {
- return &MetricFakeExecuter{RespMap: respMap}
-}
-
-func TestGetMetricList(t *testing.T) {
- var md = &MetricDriver{}
- md.Setup()
- returnedMetricList, err := md.GetMetricList("volume")
- if err != nil {
- t.Error("failed to validate metric list:", err)
- }
- if !reflect.DeepEqual(expctdMetricList, returnedMetricList) {
- t.Errorf("expected %+v, got %+v\n", expctdMetricList, returnedMetricList)
- }
-}
-
-func TestCollectMetrics(t *testing.T) {
- var md = &MetricDriver{}
- md.Setup()
- md.cli.RootExecuter = NewMetricFakeExecuter(respMap)
- md.cli.BaseExecuter = NewMetricFakeExecuter(respMap)
- var tempMetricArray []*model.MetricSpec
- for i, volume := range expctedVolList {
- convrtedVolID := convert(volume, expectdVgs[i])
- thismetricMAp := expctedMetricMap[convrtedVolID]
- thisLabelMap := expctedLabelMap[convrtedVolID]
- for _, element := range expctdMetricList {
- val, _ := strconv.ParseFloat(thismetricMAp[element], 64)
-
- expctdmetricValue := &model.Metric{
- Timestamp: 123456,
- Value: val,
- }
- expctdMetricValues := make([]*model.Metric, 0)
- expctdMetricValues = append(expctdMetricValues, expctdmetricValue)
- metric := &model.MetricSpec{
- InstanceID: volume,
- InstanceName: thismetricMAp["InstanceName"],
- Job: "lvm",
- Labels: thisLabelMap,
- Component: "volume",
- Name: element,
- Unit: metricToUnitMap[element],
- AggrType: "",
- MetricValues: expctdMetricValues,
- }
- tempMetricArray = append(tempMetricArray, metric)
- }
- }
- for _, disk := range expctedDiskList {
- convrtedVolID := formatDiskName(disk)
- thismetricMAp := expctedMetricMap[convrtedVolID]
- thisLabelMap := expctedLabelMap[convrtedVolID]
- fmt.Println(thismetricMAp)
- for _, element := range expctdMetricList {
- val, _ := strconv.ParseFloat(thismetricMAp[element], 64)
- expctdmetricValue := &model.Metric{
- Timestamp: 123456,
- Value: val,
- }
- expctdMetricValues := make([]*model.Metric, 0)
- expctdMetricValues = append(expctdMetricValues, expctdmetricValue)
- metric := &model.MetricSpec{
- InstanceID: disk,
- InstanceName: thismetricMAp["InstanceName"],
- Job: "lvm",
- Labels: thisLabelMap,
- Component: "disk",
- Name: element,
- Unit: metricToUnitMap[element],
- AggrType: "",
- MetricValues: expctdMetricValues,
- }
- tempMetricArray = append(tempMetricArray, metric)
- }
- }
- expectedMetrics := tempMetricArray
- retunMetrics, err := md.CollectMetrics()
- if err != nil {
- t.Error("failed to collect stats:", err)
- }
- // we can't use deep equal on metric spec objects as the timesatmp calulation is time.Now() in driver
- // validate equivalence of each metricspec fields against expected except timestamp
- var b bool = true
- for i, m := range expectedMetrics {
- b = b && reflect.DeepEqual(m.InstanceName, retunMetrics[i].InstanceName)
- b = b && reflect.DeepEqual(m.InstanceID, retunMetrics[i].InstanceID)
- b = b && reflect.DeepEqual(m.Job, retunMetrics[i].Job)
- b = b && reflect.DeepEqual(m.Labels, retunMetrics[i].Labels)
- b = b && reflect.DeepEqual(m.Component, retunMetrics[i].Component)
- b = b && reflect.DeepEqual(m.Unit, retunMetrics[i].Unit)
- b = b && reflect.DeepEqual(m.AggrType, retunMetrics[i].AggrType)
- for j, v := range m.MetricValues {
- b = b && reflect.DeepEqual(v.Value, retunMetrics[i].MetricValues[j].Value)
- }
- }
- if !b {
- t.Log("expected metric spec")
- printMetricSpec(expectedMetrics)
- t.Log("returned metric spec")
- printMetricSpec(retunMetrics)
- }
-}
-
-func printMetricSpec(m []*model.MetricSpec) {
- for _, p := range m {
- fmt.Printf("%+v\n", p)
- for _, v := range p.MetricValues {
- fmt.Printf("%+v\n", v)
- }
- }
-
-}
-func Test_getMetricToUnitMap(t *testing.T) {
- tests := []struct {
- name string
- want map[string]string
- }{
- {name: "test1", want: metricToUnitMap},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := getMetricToUnitMap(); !reflect.DeepEqual(got, tt.want) {
- t.Errorf("difference in expected result of getMetricToUnitMap() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-func Test_convert(t *testing.T) {
- type args struct {
- instanceID string
- vg string
- }
- tests := []struct {
- name string
- args args
- want string
- }{
- {name: "test1", args: args{instanceID: "volume-b902e771-8e02-4099-b601-a6b3881f8", vg: "opensds-volumes-default"}, want: "opensds--volumes--default-volume--b902e771--8e02--4099--b601--a6b3881f8"},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := convert(tt.args.instanceID, tt.args.vg); got != tt.want {
- t.Errorf("difference in expected result of convert() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-func Test_formatDiskName(t *testing.T) {
- type args struct {
- instanceID string
- }
- tests := []struct {
- name string
- args args
- want string
- }{
- {name: "test1", args: args{instanceID: "/dev/loop10"}, want: "loop10"},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := formatDiskName(tt.args.instanceID); got != tt.want {
- t.Errorf("difference in expected result of formatDiskName() = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/contrib/drivers/lvm/lvm_test.go b/contrib/drivers/lvm/lvm_test.go
deleted file mode 100755
index 33b98696f..000000000
--- a/contrib/drivers/lvm/lvm_test.go
+++ /dev/null
@@ -1,360 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package lvm
-
-import (
- "fmt"
- "reflect"
- "testing"
-
- . "github.com/opensds/opensds/contrib/drivers/utils/config"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- "github.com/opensds/opensds/pkg/utils/config"
- "github.com/opensds/opensds/pkg/utils/exec"
-)
-
-var fp = map[string]PoolProperties{
- "vg001": {
- StorageType: "block",
- AvailabilityZone: "default",
- MultiAttach: true,
- Extras: model.StoragePoolExtraSpec{
- DataStorage: model.DataStorageLoS{
- ProvisioningPolicy: "Thin",
- Compression: false,
- Deduplication: false,
- },
- IOConnectivity: model.IOConnectivityLoS{
- AccessProtocol: "iscsi",
- MaxIOPS: 7000000,
- MaxBWS: 600,
- MinIOPS: 1000000,
- MinBWS: 100,
- Latency: 100,
- },
- Advanced: map[string]interface{}{
- "diskType": "SSD",
- "latency": "5ms",
- },
- },
- },
-}
-
-func TestSetup(t *testing.T) {
- var d = &Driver{}
- config.CONF.OsdsDock.Backends.LVM.ConfigPath = "testdata/lvm.yaml"
- var expectedDriver = &Driver{
- conf: &LVMConfig{
- Pool: fp,
- TgtBindIp: "192.168.56.105",
- TgtConfDir: "/etc/tgt/conf.d",
- EnableChapAuth: true,
- },
- }
-
- if err := d.Setup(); err != nil {
- t.Errorf("Setup lvm driver failed: %+v\n", err)
- }
- if !reflect.DeepEqual(d.conf, expectedDriver.conf) {
- t.Errorf("Expected %+v, got %+v", expectedDriver.conf, d.conf)
- }
-}
-
-type FakeResp struct {
- out string
- err error
-}
-
-func NewFakeExecuter(respMap map[string]*FakeResp) exec.Executer {
- return &FakeExecuter{RespMap: respMap}
-}
-
-type FakeExecuter struct {
- RespMap map[string]*FakeResp
-}
-
-func (f *FakeExecuter) Run(name string, args ...string) (string, error) {
- var cmd = name
- if name == "env" {
- cmd = args[1]
- }
- v, ok := f.RespMap[cmd]
- if !ok {
- return "", fmt.Errorf("can find specified op: %s", args[1])
- }
- return v.out, v.err
-}
-
-func TestCreateVolume(t *testing.T) {
- var fd = &Driver{}
- config.CONF.OsdsDock.Backends.LVM.ConfigPath = "testdata/lvm.yaml"
- fd.Setup()
-
- respMap := map[string]*FakeResp{
- "lvcreate": {"", nil},
- }
- fd.cli.RootExecuter = NewFakeExecuter(respMap)
- fd.cli.BaseExecuter = NewFakeExecuter(respMap)
-
- opt := &pb.CreateVolumeOpts{
- Id: "e1bb066c-5ce7-46eb-9336-25508cee9f71",
- Name: "test001",
- Description: "volume for testing",
- Size: int64(1),
- PoolName: "vg001",
- }
- var expected = &model.VolumeSpec{
- BaseModel: &model.BaseModel{},
- Name: "test001",
- Description: "volume for testing",
- Size: int64(1),
- Identifier: &model.Identifier{DurableName: "61bb066c5ce746eb933625508cee9f71", DurableNameFormat: "NAA"},
- Metadata: map[string]string{
- "lvPath": "/dev/vg001/volume-e1bb066c-5ce7-46eb-9336-25508cee9f71",
- },
- }
- vol, err := fd.CreateVolume(opt)
- if err != nil {
- t.Error("Failed to create volume:", err)
- }
- vol.Id = ""
- if !reflect.DeepEqual(vol, expected) {
- t.Errorf("Expected %+v, got %+v\n", expected, vol)
- }
-}
-
-func TestCreateVolumeFromSnapshot(t *testing.T) {
- var fd = &Driver{}
- config.CONF.OsdsDock.Backends.LVM.ConfigPath = "testdata/lvm.yaml"
- fd.Setup()
-
- respMap := map[string]*FakeResp{
- "lvcreate": {"", nil},
- "dd": {"", nil},
- }
- fd.cli.RootExecuter = NewFakeExecuter(respMap)
- fd.cli.BaseExecuter = NewFakeExecuter(respMap)
-
- opt := &pb.CreateVolumeOpts{
- Id: "e1bb066c-5ce7-46eb-9336-25508cee9f71",
- Name: "test001",
- Description: "volume for testing",
- Size: int64(1),
- PoolName: "vg001",
- SnapshotId: "3769855c-a102-11e7-b772-17b880d2f537",
- SnapshotSize: int64(1),
- }
- var expected = &model.VolumeSpec{
- BaseModel: &model.BaseModel{},
- Name: "test001",
- Description: "volume for testing",
- Size: int64(1),
- Identifier: &model.Identifier{DurableName: "61bb066c5ce746eb933625508cee9f71", DurableNameFormat: "NAA"},
- Metadata: map[string]string{
- "lvPath": "/dev/vg001/volume-e1bb066c-5ce7-46eb-9336-25508cee9f71",
- },
- }
- vol, err := fd.CreateVolume(opt)
- if err != nil {
- t.Error("Failed to create volume:", err)
- }
- vol.Id = ""
- if !reflect.DeepEqual(vol, expected) {
- t.Errorf("Expected %+v, got %+v\n", expected, vol)
- }
-}
-
-func TestDeleteVolume(t *testing.T) {
- var fd = &Driver{}
- config.CONF.OsdsDock.Backends.LVM.ConfigPath = "testdata/lvm.yaml"
- fd.Setup()
-
- respMap := map[string]*FakeResp{
- "lvdisplay": {"-wi-a-----", nil},
- "lvremove": {"", nil},
- }
- fd.cli.RootExecuter = NewFakeExecuter(respMap)
- fd.cli.BaseExecuter = NewFakeExecuter(respMap)
-
- opt := &pb.DeleteVolumeOpts{
- Metadata: map[string]string{
- "lvPath": "/dev/vg001/test001",
- },
- }
- if err := fd.DeleteVolume(opt); err != nil {
- t.Error("Failed to delete volume:", err)
- }
-}
-
-func TestExtendVolume(t *testing.T) {
- var fd = &Driver{}
- config.CONF.OsdsDock.Backends.LVM.ConfigPath = "testdata/lvm.yaml"
- fd.Setup()
-
- respMap := map[string]*FakeResp{
- "lvdisplay": {"-wi-a-----", nil},
- "lvchange": {"", nil},
- "lvextend": {"", nil},
- }
- fd.cli.RootExecuter = NewFakeExecuter(respMap)
- fd.cli.BaseExecuter = NewFakeExecuter(respMap)
-
- opt := &pb.ExtendVolumeOpts{
- Id: "591c43e6-1156-42f5-9fbc-161153da185c",
- Metadata: map[string]string{
- "lvPath": "/dev/vg001/test001",
- },
- Size: int64(1),
- }
-
- vol, err := fd.ExtendVolume(opt)
- if err != nil {
- t.Error("Failed to extend volume:", err)
- }
-
- if vol.Size != 1 {
- t.Errorf("Expected %+v, got %+v\n", 1, vol.Size)
- }
-}
-
-func TestCreateSnapshot(t *testing.T) {
- var fd = &Driver{}
- config.CONF.OsdsDock.Backends.LVM.ConfigPath = "testdata/lvm.yaml"
- fd.Setup()
-
- respMap := map[string]*FakeResp{
- "lvcreate": {"-wi-a-----", nil},
- }
- fd.cli.RootExecuter = NewFakeExecuter(respMap)
- fd.cli.BaseExecuter = NewFakeExecuter(respMap)
-
- opt := &pb.CreateVolumeSnapshotOpts{
- Id: "d1916c49-3088-4a40-b6fb-0fda18d074c3",
- Name: "snap001",
- Description: "volume snapshot for testing",
- Size: int64(1),
- VolumeId: "bd5b12a8-a101-11e7-941e-d77981b584d8",
- Metadata: map[string]string{
- "lvPath": "/dev/vg001/test001",
- },
- }
- var expected = &model.VolumeSnapshotSpec{
- BaseModel: &model.BaseModel{},
- Name: "snap001",
- Description: "volume snapshot for testing",
- Size: int64(1),
- VolumeId: "bd5b12a8-a101-11e7-941e-d77981b584d8",
- Metadata: map[string]string{
- "lvsPath": "/dev/vg001/_snapshot-d1916c49-3088-4a40-b6fb-0fda18d074c3",
- },
- }
- snp, err := fd.CreateSnapshot(opt)
- if err != nil {
- t.Error("Failed to create volume snapshot:", err)
- }
- snp.Id = ""
- snp.Metadata["lvsPath"] = "/dev/vg001/_snapshot-d1916c49-3088-4a40-b6fb-0fda18d074c3"
- if !reflect.DeepEqual(snp, expected) {
- t.Errorf("Expected %+v, got %+v\n", expected, snp)
- }
-}
-
-func TestDeleteSnapshot(t *testing.T) {
- var fd = &Driver{}
- config.CONF.OsdsDock.Backends.LVM.ConfigPath = "testdata/lvm.yaml"
- fd.Setup()
-
- lvsResp := ` _snapshot-f0594d2b-ffdf-4947-8380-089f0bc17389
- volume-0e2f4a9e-4a94-4d27-b1b4-83464811605c
- volume-591c43e6-1156-42f5-9fbc-161153da185c
- root
- swap_1
-`
- respMap := map[string]*FakeResp{
- "lvs": {lvsResp, nil},
- "lvdisplay": {"-wi-a-----", nil},
- "lvremove": {"", nil},
- }
- fd.cli.RootExecuter = NewFakeExecuter(respMap)
- fd.cli.BaseExecuter = NewFakeExecuter(respMap)
-
- opt := &pb.DeleteVolumeSnapshotOpts{
- Metadata: map[string]string{
- "lvsPath": "/dev/vg001/snap001",
- },
- }
-
- if err := fd.DeleteSnapshot(opt); err != nil {
- t.Error("Failed to delete volume snapshot:", err)
- }
-}
-
-func TestListPools(t *testing.T) {
- var fd = &Driver{}
- config.CONF.OsdsDock.Backends.LVM.ConfigPath = "testdata/lvm.yaml"
- fd.Setup()
-
- var vgsResp = ` vg001 18.00 18.00 ahF6kS-QNOH-X63K-avat-6Kag-XLTo-c9ghQ6
- ubuntu-vg 127.52 0.03 fQbqtg-3vDQ-vk3U-gfsT-50kJ-30pq-OZVSJH
-`
- respMap := map[string]*FakeResp{
- "vgs": {vgsResp, nil},
- }
- fd.cli.RootExecuter = NewFakeExecuter(respMap)
- fd.cli.BaseExecuter = NewFakeExecuter(respMap)
-
- var expected = []*model.StoragePoolSpec{
- {
- BaseModel: &model.BaseModel{},
- Name: "vg001",
- TotalCapacity: int64(18),
- FreeCapacity: int64(18),
- AvailabilityZone: "default",
- StorageType: "block",
- MultiAttach: true,
- Extras: model.StoragePoolExtraSpec{
- DataStorage: model.DataStorageLoS{
- ProvisioningPolicy: "Thin",
- Compression: false,
- Deduplication: false,
- },
- IOConnectivity: model.IOConnectivityLoS{
- AccessProtocol: "iscsi",
- MaxIOPS: 7000000,
- MaxBWS: 600,
- MinIOPS: 1000000,
- MinBWS: 100,
- Latency: 100,
- },
- Advanced: map[string]interface{}{
- "diskType": "SSD",
- "latency": "5ms",
- },
- },
- },
- }
-
- pols, err := fd.ListPools()
- if err != nil {
- t.Error("Failed to list pools:", err)
- }
- for i := range pols {
- pols[i].Id = ""
- }
- if !reflect.DeepEqual(pols, expected) {
- t.Errorf("Expected %+v, got %+v\n", expected[0], pols[0])
- }
-}
diff --git a/contrib/drivers/lvm/metrics_cli.go b/contrib/drivers/lvm/metrics_cli.go
deleted file mode 100644
index 5e567160c..000000000
--- a/contrib/drivers/lvm/metrics_cli.go
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package lvm
-
-import (
- "regexp"
- "strings"
-
- log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/utils/exec"
-)
-
-const (
- sarNotEnabledOut = "Please check if data collecting is enabled"
- cmdNotFound = "No such file or directory"
- sarNotFound = "Command 'sar' not found"
- iostatNotFound = "Command 'iostat' not found"
-)
-
-type MetricCli struct {
- // Command executer
- BaseExecuter exec.Executer
- // Command Root executer
- RootExecuter exec.Executer
-}
-
-func NewMetricCli() (*MetricCli, error) {
- return &MetricCli{
- BaseExecuter: exec.NewBaseExecuter(),
- RootExecuter: exec.NewRootExecuter(),
- }, nil
-}
-
-func (c *MetricCli) execute(cmd ...string) (string, error) {
- return c.RootExecuter.Run(cmd[0], cmd[1:]...)
-}
-
-func isSarEnabled(out string) bool {
-
- if strings.Contains(string(out), sarNotEnabledOut) || strings.Contains(string(out), cmdNotFound) || strings.Contains(string(out), sarNotFound) {
-
- return false
- }
-
- return true
-}
-
-// Function to parse sar and iostat command output
-// metricList -> metrics to be collected
-// metricMap -> metric to command output column mapping
-// out -> command output
-// returnMap -> metric to value map to be returned
-func (c *MetricCli) parseCommandOutput(metricList []string, returnMap map[string]map[string]string, labelMap map[string]map[string]string, metricMap map[string]int, out string) {
-
- tableRows := strings.Split(string(out), "\n")
- for _, row := range tableRows[3:] {
- if row != "" {
- tokens := regexp.MustCompile(" ")
- cols := tokens.Split(row, -1)
- // remove all empty space
- var columns = make([]string, 0, 0)
- for _, v := range cols {
- if v != "" {
- columns = append(columns, v)
- }
- }
- // map the values
- aVolMap := make(map[string]string)
- aLabMap := make(map[string]string)
- for _, metric := range metricList {
- val := columns[metricMap[metric]]
- aVolMap[metric] = val
- aVolMap["InstanceName"] = columns[metricMap["InstanceID"]]
- aLabMap["device"] = columns[metricMap["InstanceID"]]
- }
- returnMap[columns[1]] = aVolMap
- labelMap[columns[1]] = aLabMap
- }
- }
-
-}
-
-// CollectMetrics function is to call the cli for metrics collection. This will be invoked by lvm metric driver
-// metricList -> metrics to be collected
-// returnMap -> metrics to value map
-func (cli *MetricCli) CollectMetrics(metricList []string) ( /*returnMAp*/ map[string]map[string]string /*labelMap*/, map[string]map[string]string, error) {
-
- returnMap := make(map[string]map[string]string)
- labelMap := make(map[string]map[string]string)
- var err error
-
- cmd := []string{"env", "LC_ALL=C", "sar", "-dp", "1", "1"}
- out, err := cli.execute(cmd...)
- if err != nil {
- log.Errorf("cmd.Run() failed with %s\n", err)
- err = nil
-
- }
- //check whether sar collection is enabled ?
- //If not use iostat command
- if isSarEnabled(out) {
- // sar command output mapping
- metricMap := make(map[string]int)
- metricMap["InstanceID"] = 1
- metricMap["iops"] = 2
- metricMap["read_throughput"] = 3
- metricMap["write_throughput"] = 4
- metricMap["response_time"] = 7
- metricMap["service_time"] = 8
- metricMap["utilization"] = 9
- //call parser
- cli.parseCommandOutput(metricList, returnMap, labelMap, metricMap, out)
- } else {
- cmd := []string{"env", "LC_ALL=C", "iostat", "-N"}
- out, err := cli.execute(cmd...)
- if strings.Contains(string(out), cmdNotFound) || strings.Contains(string(out), iostatNotFound) {
- log.Errorf("iostat is not available: cmd.Run() failed with %s\n", err)
- return nil, nil, err
- } else if err != nil {
- log.Errorf("cmd.Run() failed with %s\n", err)
- return nil, nil, err
- }
- metricMap := make(map[string]int)
- // iostat command output mapping
- metricMap["iops"] = 1
- metricMap["read_throughput"] = 2
- metricMap["write_throughput"] = 3
- cli.parseCommandOutput(metricList, returnMap, labelMap, metricMap, out)
-
- }
- return returnMap, labelMap, err
-}
-
-// Discover LVM volumes
-func (c *MetricCli) DiscoverVolumes() ([]string, []string, error) {
- cmd := []string{"env", "LC_ALL=C", "lvs"}
- out, err := c.execute(cmd...)
- tableRows := strings.Split(string(out), "\n")
- var volumes []string
- var vgs []string
- for _, row := range tableRows[1:] {
- if row != "" {
- tokens := regexp.MustCompile(" ")
- cols := tokens.Split(row, -1)
- var columns = make([]string, 0, 0)
- for _, v := range cols {
- if v != "" {
- columns = append(columns, v)
- }
- }
- volumes = append(volumes, columns[0])
- vgs = append(vgs, columns[1])
-
- }
- }
- return volumes, vgs, err
-}
-
-// Discover LVM Disks
-func (c *MetricCli) DiscoverDisks() ([]string, error) {
- cmd := []string{"env", "LC_ALL=C", "pvs"}
- out, err := c.execute(cmd...)
- tableRows := strings.Split(string(out), "\n")
- var volumes []string
- for _, row := range tableRows[1:] {
- if row != "" {
- tokens := regexp.MustCompile(" ")
- cols := tokens.Split(row, -1)
- var columns = make([]string, 0, 0)
- for _, v := range cols {
- if v != "" {
- columns = append(columns, v)
- }
- }
- volumes = append(volumes, columns[0])
-
- }
- }
- return volumes, err
-}
diff --git a/contrib/drivers/lvm/metrics_cli_test.go b/contrib/drivers/lvm/metrics_cli_test.go
deleted file mode 100644
index ea0398593..000000000
--- a/contrib/drivers/lvm/metrics_cli_test.go
+++ /dev/null
@@ -1,237 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package lvm
-
-import (
- "reflect"
- "testing"
-
- "github.com/opensds/opensds/pkg/utils/exec"
-)
-
-func TestNewMetricCli(t *testing.T) {
- tests := []struct {
- name string
- want *MetricCli
- wantErr bool
- }{
- {name: "test1", want: &MetricCli{
- BaseExecuter: exec.NewBaseExecuter(),
- RootExecuter: exec.NewRootExecuter(),
- }, wantErr: false},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got, err := NewMetricCli()
- if (err != nil) != tt.wantErr {
- t.Errorf("unexpected error output for NewMetricCli() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("unexpected cli output for NewMetricCli() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestMetricCli_execute(t *testing.T) {
- type fields struct {
- BaseExecuter exec.Executer
- RootExecuter exec.Executer
- }
- type args struct {
- cmd []string
- }
- tests := []struct {
- name string
- fields fields
- args args
- want string
- wantErr bool
- }{
- {name: "test1", fields: fields{}, args: args{}, want: "env: ‘error_cmd’: No such file or directory\n", wantErr: true},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- c, _ := NewMetricCli()
- got, err := c.execute("env", "LC_ALL=C", "error_cmd")
- if (err != nil) != tt.wantErr {
- t.Errorf("difference in expected result of MetricCli.execute() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if got != tt.want {
- t.Errorf("difference in expected result of of MetricCli.execute() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func Test_isSarEnabled(t *testing.T) {
- type args struct {
- out string
- }
- tests := []struct {
- name string
- args args
- want bool
- }{
- {name: "test1", args: args{out: "Please check if data collecting is enabled"}, want: false},
- {name: "test1", args: args{out: "Command 'sar' not found"}, want: false},
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := isSarEnabled(tt.args.out); got != tt.want {
- t.Errorf("difference in expected result of expected isSarEnabled() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestMetricCli_parseCommandOutput(t *testing.T) {
-
- type fields struct {
- BaseExecuter exec.Executer
- RootExecuter exec.Executer
- }
- type args struct {
- metricList []string
-
- out *MetricFakeResp
- }
- tests := []struct {
- name string
- fields fields
- args args
- }{
- {name: "test1", fields: fields{}, args: args{metricList: expctdMetricList, out: respMap["sar"]}},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- returnMap := make(map[string]map[string]string)
- labelMap := make(map[string]map[string]string)
- metricMap := make(map[string]int)
- c := &MetricCli{
- BaseExecuter: tt.fields.BaseExecuter,
- RootExecuter: tt.fields.RootExecuter,
- }
- c.parseCommandOutput(tt.args.metricList, returnMap, labelMap, metricMap, tt.args.out.out)
- })
- }
-}
-
-func TestMetricCli_CollectMetrics(t *testing.T) {
- type fields struct {
- BaseExecuter exec.Executer
- RootExecuter exec.Executer
- }
- type args struct {
- metricList []string
- }
- var expctedLabelMap map[string]map[string]string = map[string]map[string]string{"loop2": map[string]string{"device": "loop2"}, "loop3": map[string]string{"device": "loop3"}, "loop4": map[string]string{"device": "loop4"}, "loop5": map[string]string{"device": "loop5"}, "loop6": map[string]string{"device": "loop6"}, "loop7": map[string]string{"device": "loop7"}, "loop9": map[string]string{"device": "loop9"}, "loop10": map[string]string{"device": "loop10"}, "opensds--volumes--default-volume-d96cc42b-b285-474e-aa98-c61e66df7461": map[string]string{"device": "opensds--volumes--default-volume-d96cc42b-b285-474e-aa98-c61e66df7461"}, "opensds--volumes--default-volume--b902e771--8e02--4099--b601--a6b3881f8": map[string]string{"device": "opensds--volumes--default-volume--b902e771--8e02--4099--b601--a6b3881f8"}}
-
- tests := []struct {
- name string
- fields fields
- args args
- want map[string]map[string]string
- wantErr bool
- }{
- {name: "test1", fields: fields{}, args: args{metricList: expctdMetricList}, want: expctedLabelMap, wantErr: false},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- cli := &MetricCli{
- BaseExecuter: NewMetricFakeExecuter(respMap),
- RootExecuter: NewMetricFakeExecuter(respMap),
- }
- _, got1, err := cli.CollectMetrics(tt.args.metricList)
- if (err != nil) != tt.wantErr {
- t.Errorf("difference in expected result of MetricCli.CollectMetrics() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if !reflect.DeepEqual(got1, tt.want) {
- t.Errorf("difference in expected result of MetricCli.CollectMetrics() got = %v, want %v", got1, tt.want)
- }
-
- })
- }
-}
-
-func TestMetricCli_DiscoverVolumes(t *testing.T) {
- type fields struct {
- BaseExecuter exec.Executer
- RootExecuter exec.Executer
- }
- tests := []struct {
- name string
- fields fields
- want []string
- want1 []string
- wantErr bool
- }{
- {name: "test1", fields: fields{}, want: expctedVolList, want1: expectdVgs},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- c := &MetricCli{
- BaseExecuter: NewMetricFakeExecuter(respMap),
- RootExecuter: NewMetricFakeExecuter(respMap),
- }
- got, got1, err := c.DiscoverVolumes()
- if (err != nil) != tt.wantErr {
- t.Errorf("difference in expected result of MetricCli.DiscoverVolumes() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("difference in expected result of MetricCli.DiscoverVolumes() got = %v, want %v", got, tt.want)
- }
- if !reflect.DeepEqual(got1, tt.want1) {
- t.Errorf("difference in expected result of MetricCli.DiscoverVolumes() got1 = %v, want %v", got1, tt.want1)
- }
- })
- }
-}
-
-func TestMetricCli_DiscoverDisks(t *testing.T) {
- type fields struct {
- BaseExecuter exec.Executer
- RootExecuter exec.Executer
- }
- tests := []struct {
- name string
- fields fields
- want []string
- wantErr bool
- }{
- {name: "test1", fields: fields{}, want: expctedDiskList, wantErr: false},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- c := &MetricCli{
- BaseExecuter: NewMetricFakeExecuter(respMap),
- RootExecuter: NewMetricFakeExecuter(respMap),
- }
- got, err := c.DiscoverDisks()
- if (err != nil) != tt.wantErr {
- t.Errorf("difference in expected result of MetricCli.DiscoverDisks() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("difference in expected result of MetricCli.DiscoverDisks() = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/contrib/drivers/lvm/targets/iscsi.go b/contrib/drivers/lvm/targets/iscsi.go
deleted file mode 100755
index 7b56ee701..000000000
--- a/contrib/drivers/lvm/targets/iscsi.go
+++ /dev/null
@@ -1,307 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package targets
-
-import (
- "errors"
- "fmt"
- "io/ioutil"
- "net"
- "os"
- "os/exec"
- "strconv"
- "strings"
-
- log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/utils"
-)
-
-const (
- opensdsPrefix = "opensds-"
- tgtAdminCmd = "tgt-admin"
-)
-
-type ISCSITarget interface {
- CreateISCSITarget(volId, tgtIqn, path, hostIp, initiator string, chapAuth []string) error
- GetISCSITarget(iqn string) int
- RemoveISCSITarget(volId, iqn, hostIp string) error
- GetLun(path string) int
-}
-
-func NewISCSITarget(bip, tgtConfDir string) ISCSITarget {
- return &tgtTarget{
- TgtConfDir: tgtConfDir,
- BindIp: bip,
- }
-}
-
-type tgtTarget struct {
- BindIp string
- TgtConfDir string
-}
-
-func (t *tgtTarget) GetLun(path string) int {
- out, err := t.execCmd(tgtAdminCmd, "--show")
- if err != nil {
- log.Errorf("Fail to exec '%s' to display iscsi target:%v", tgtAdminCmd, err)
- return -1
- }
-
- var lun = -1
- var lines = strings.Split(out, "\n")
- for num, line := range lines {
- if strings.Contains(line, path) {
- for i := 1; i < num; i++ {
- if strings.Contains(lines[num-i], "LUN") {
- lunString := strings.Fields(lines[num-i])[1]
- lun, err = strconv.Atoi(lunString)
- if err != nil {
- return -1
- }
- return lun
- }
- }
- }
- }
- log.Info("Got lun id:", lun)
-
- return -1
-}
-
-func (t *tgtTarget) getTgtConfPath(volId string) string {
- return t.TgtConfDir + "/" + opensdsPrefix + volId + ".conf"
-}
-
-type configMap map[string][]string
-
-func CreateScsiIDFromVolID(volID string) string {
- //Construct a 32 digit NAA.6 ( Network Addressing Authority) Identifier for the volume.
- scsi_id := strings.Replace(volID, "-", "", -1)
- out := []rune(scsi_id)
- // Make the first digit 6 , which specifies the IEEE registerd extended format for WWN.
- out[0] = '6'
- return string(out)
-}
-func (t *tgtTarget) CreateISCSITarget(volId, tgtIqn, path, hostIp, initiator string, chapAuth []string) error {
- // Multi-attach require a specific ip
- if hostIp == "" || hostIp == "ALL" {
- msg := fmt.Sprintf("create ISCSI target failed: host ip %s cannot be empty or ALL, iscsi only allows specific ip access, not all", hostIp)
- log.Error(msg)
- return errors.New(msg)
- }
-
- result := net.ParseIP(hostIp)
- if result == nil {
- msg := fmt.Sprintf("%s is not a valid ip, please give the proper ip", hostIp)
- log.Error(msg)
- return errors.New(msg)
- }
-
- if exist, _ := utils.PathExists(t.TgtConfDir); !exist {
- os.MkdirAll(t.TgtConfDir, 0755)
- }
-
- config := make(configMap)
-
- configFile := t.getTgtConfPath(volId)
- scsiID := CreateScsiIDFromVolID(volId)
-
- if IsExist(configFile) {
- data, err := ioutil.ReadFile(configFile)
- if err != nil {
- return err
- }
- config.parse(string(data))
- }
-
- var charStr string
- if len(chapAuth) != 0 {
- charStr = fmt.Sprintf("%s %s", chapAuth[0], chapAuth[1])
- config.updateConfigmap("incominguser", charStr)
- }
-
- config.updateConfigmap("initiator-address", hostIp)
- config.updateConfigmap("driver", "iscsi")
- config.updateConfigmap("backing-store", path)
- config.updateConfigmap("write-cache", "on")
- config.updateConfigmap("scsi_id", scsiID)
-
- err := config.writeConfig(configFile, tgtIqn)
- if err != nil {
- log.Errorf("failed to update config file %s %v", t.getTgtConfPath(volId), err)
- return err
- }
-
- if info, err := t.execCmd(tgtAdminCmd, "--force", "--update", tgtIqn); err != nil {
- log.Errorf("Fail to exec '%s' to create iscsi target, %s,%v", tgtAdminCmd, string(info), err)
- return err
- }
-
- if t.GetISCSITarget(tgtIqn) == -1 {
- log.Errorf("Failed to create iscsi target for Volume "+
- "ID: %s. It could be caused by problem "+
- "with concurrency. "+
- "Also please ensure your tgtd config "+
- "file contains 'include %s/*'",
- volId, t.TgtConfDir)
- return fmt.Errorf("failed to create volume(%s) attachment", volId)
- }
- return nil
-}
-
-func (t *tgtTarget) GetISCSITarget(iqn string) int {
- out, err := t.execCmd(tgtAdminCmd, "--show")
- if err != nil {
- log.Errorf("Fail to exec '%s' to display iscsi target:%v", tgtAdminCmd, err)
- return -1
- }
-
- var tid = -1
- for _, line := range strings.Split(out, "\n") {
- if strings.Contains(line, iqn) {
- tidString := strings.Fields(strings.Split(line, ":")[0])[1]
- tid, err = strconv.Atoi(tidString)
- if err != nil {
- return -1
- }
- break
- }
- }
- return tid
-}
-
-func (t *tgtTarget) RemoveISCSITarget(volId, iqn, hostIp string) error {
- if hostIp == "" {
- return errors.New("remove ISCSI target failed, host ip cannot be empty")
- }
-
- tgtConfPath := t.getTgtConfPath(volId)
- if exist, _ := utils.PathExists(tgtConfPath); !exist {
- log.Warningf("Volume path %s does not exist, nothing to remove.", tgtConfPath)
- return nil
- }
-
- config := make(configMap)
-
- data, err := ioutil.ReadFile(tgtConfPath)
- if err != nil {
- return err
- }
-
- config.parse(string(data))
-
- ips := config["initiator-address"]
- for i, v := range ips {
- if v == hostIp {
- ips = append(ips[:i], ips[i+1:]...)
- break
- }
- }
- config["initiator-address"] = ips
- if len(ips) == 0 {
- if info, err := t.execCmd(tgtAdminCmd, "--force", "--delete", iqn); err != nil {
- log.Errorf("Fail to exec '%s' to forcely remove iscsi target, %s, %v",
- tgtAdminCmd, string(info), err)
- return err
- }
-
- os.Remove(tgtConfPath)
- } else {
- err := config.writeConfig(t.getTgtConfPath(volId), iqn)
- if err != nil {
- log.Errorf("failed to update config file %s %v", t.getTgtConfPath(volId), err)
- return err
- }
-
- if info, err := t.execCmd(tgtAdminCmd, "--force", "--update", iqn); err != nil {
- log.Errorf("Fail to exec '%s' to create iscsi target, %s,%v", tgtAdminCmd, string(info), err)
- return err
- }
-
- if t.GetISCSITarget(iqn) == -1 {
- log.Errorf("Failed to create iscsi target for Volume "+
- "ID: %s. It could be caused by problem "+
- "with concurrency. "+
- "Also please ensure your tgtd config "+
- "file contains 'include %s/*'",
- volId, t.TgtConfDir)
- return fmt.Errorf("failed to create volume(%s) attachment", volId)
- }
- }
-
- return nil
-}
-
-func (*tgtTarget) execCmd(name string, cmd ...string) (string, error) {
- ret, err := exec.Command(name, cmd...).Output()
- log.Infoln("Command:", cmd, strings.Join(cmd, " "))
- log.V(8).Infof("result:%s", string(ret))
- if err != nil {
- log.Error("error info:", err)
- }
- return string(ret), err
-}
-
-func (m *configMap) parse(data string) {
- var lines = strings.Split(data, "\n")
-
- for _, line := range lines {
- for _, key := range []string{"backing-store", "driver", "initiator-address", "write-cache"} {
- if strings.Contains(line, key) {
- s := strings.TrimSpace(line)
- if (*m)[key] == nil {
- (*m)[key] = []string{strings.Split(s, " ")[1]}
- } else {
- (*m)[key] = append((*m)[key], strings.Split(s, " ")[1])
- }
- }
- }
- }
-}
-
-func (m *configMap) updateConfigmap(key, value string) {
- v := (*m)[key]
- if v == nil {
- (*m)[key] = []string{value}
- } else {
- if !utils.Contains(v, value) {
- v = append(v, value)
- (*m)[key] = v
- }
- }
-}
-
-func (m configMap) writeConfig(file, tgtIqn string) error {
- f, err := os.Create(file)
- if err != nil {
- return err
- }
- defer f.Close()
-
- f.WriteString(fmt.Sprintf("\n", tgtIqn))
- for k, v := range m {
- for _, vl := range v {
- f.WriteString(fmt.Sprintf(" %s %s\n", k, vl))
- }
- }
- f.WriteString("")
- f.Sync()
- return nil
-}
-
-func IsExist(f string) bool {
- _, err := os.Stat(f)
- return err == nil || os.IsExist(err)
-}
diff --git a/contrib/drivers/lvm/targets/nvmeof.go b/contrib/drivers/lvm/targets/nvmeof.go
deleted file mode 100755
index 079e0c831..000000000
--- a/contrib/drivers/lvm/targets/nvmeof.go
+++ /dev/null
@@ -1,422 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package targets
-
-import (
- "bytes"
- "errors"
- "io"
- "os"
- "os/exec"
- "strings"
-
- log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/utils"
-)
-
-const (
- opensdsNvmeofPrefix = "opensds-Nvmeof"
- NvmetDir = "/sys/kernel/config/nvmet"
-)
-
-type NvmeofTarget interface {
- AddNvmeofSubsystem(volId, tgtNqn, path, initiator string) (string, error)
- RemoveNvmeofSubsystem(volId, nqn string) error
- GetNvmeofSubsystem(nqn string) (string, error)
- CreateNvmeofTarget(volId, tgtIqn, path, initiator, transtype string) error
- GetNvmeofTarget(nqn, transtype string) (bool, error)
- RemoveNvmeofTarget(volId, nqn, transtype string) error
-}
-
-func NewNvmeofTarget(bip, tgtConfDir string) NvmeofTarget {
- return &NvmeoftgtTarget{
- TgtConfDir: tgtConfDir,
- BindIp: bip,
- }
-}
-
-type NvmeoftgtTarget struct {
- BindIp string
- TgtConfDir string
-}
-
-func (t *NvmeoftgtTarget) init() {
- t.execCmd("modprobe", "nvmet")
- t.execCmd("modprobe", "nvmet-rdma")
- t.execCmd("modprobe", "nvmet-tcp")
- t.execCmd("modprobe", "nvmet-fc")
-}
-
-func (t *NvmeoftgtTarget) getTgtConfPath(volId string) string {
- return NvmetDir + "/" + opensdsNvmeofPrefix + volId
-}
-
-func (t *NvmeoftgtTarget) convertTranstype(transtype string) string {
- var portid string
- switch transtype {
- case "fc":
- portid = "3"
- case "rdma":
- portid = "2"
- default:
- portid = "1"
- log.Infof("default nvmeof transtype : tcp")
- }
- return portid
-}
-
-func (t *NvmeoftgtTarget) AddNvmeofSubsystem(volId, tgtNqn, path, initiator string) (string, error) {
- if exist, _ := utils.PathExists(NvmetDir); !exist {
- os.MkdirAll(NvmetDir, 0755)
- }
- sysdir := NvmetDir + "/subsystems/" + tgtNqn
- if exist, _ := utils.PathExists(sysdir); !exist {
- os.MkdirAll(sysdir, 0755)
- }
-
- var err error
- if initiator == "ALL" {
- // echo 1 > attr_allow_any_host
- attrfile := sysdir + "/attr_allow_any_host"
- content := "1"
- err = t.WriteWithIo(attrfile, content)
- if err != nil {
- log.Errorf("can not set attr_allow_any_host ")
- t.RemoveNvmeofSubsystem(volId, tgtNqn)
- return "", err
- }
- } else {
- // allow specific initiators to connect to this target
- var initiatorInfo = initiator
- hostpath := NvmetDir + "/hosts"
- if exist, _ := utils.PathExists(hostpath); !exist {
- os.MkdirAll(hostpath, 0755)
- }
-
- hostDir := hostpath + "/" + initiatorInfo
- if exist, _ := utils.PathExists(hostDir); !exist {
- os.MkdirAll(hostDir, 0755)
- }
- // create symbolic link of host
- hostsys := sysdir + "/allowed_hosts/"
- _, err = t.execCmd("ln", "-s", hostDir, hostsys)
- if err != nil {
- log.Errorf("Fail to create host link: " + initiatorInfo)
- t.RemoveNvmeofSubsystem(volId, tgtNqn)
- return "", err
- }
- }
-
- // get volume namespaceid
- namespaceid := t.Getnamespaceid(volId)
- if namespaceid == "" {
- t.RemoveNvmeofSubsystem(volId, tgtNqn)
- return "", errors.New("null namesapce")
- }
- namespace := sysdir + "/namespaces/" + namespaceid
- if exist, _ := utils.PathExists(namespace); !exist {
- os.MkdirAll(namespace, 0755)
- }
-
- // volid as device path
- devpath := namespace + "/device_path"
- err = t.WriteWithIo(devpath, path)
- if err != nil {
- log.Errorf("Fail to set device path")
- t.RemoveNvmeofSubsystem(volId, tgtNqn)
- return "", err
- }
-
- enablepath := namespace + "/enable"
- err = t.WriteWithIo(enablepath, "1")
- if err != nil {
- log.Errorf("Fail to set device path")
- t.RemoveNvmeofSubsystem(volId, tgtNqn)
- return "", err
- }
- log.Infof("new added subsys : %s", sysdir)
- return sysdir, nil
-}
-
-func (t *NvmeoftgtTarget) GetNvmeofSubsystem(nqn string) (string, error) {
- subsysdir := NvmetDir + "/subsystems/" + nqn
- if _, err := os.Stat(subsysdir); err == nil {
- return subsysdir, nil
-
- } else if os.IsNotExist(err) {
- return "", nil
-
- } else {
- log.Errorf("can not get nvmeof subsystem")
- return "", err
- }
-
-}
-
-func (t *NvmeoftgtTarget) CreateNvmeofTarget(volId, tgtNqn, path, initiator, transtype string) error {
-
- if tgtexisted, err := t.GetNvmeofTarget(tgtNqn, transtype); tgtexisted == true && err == nil {
- log.Infof("Nvmeof target %s with transtype %s has existed", tgtNqn, transtype)
- return nil
- } else if err != nil {
- log.Errorf("can not get nvmeof target %s with transport type %s", tgtNqn, transtype)
- return err
- }
-
- var subexisted string
- subexisted, err := t.GetNvmeofSubsystem(tgtNqn)
- if err != nil {
- log.Errorf("can not get nvmeof subsystem %s ", tgtNqn)
- return err
- } else if subexisted == "" {
- log.Infof("add new nqn subsystem %s", tgtNqn)
- subexisted, err = t.AddNvmeofSubsystem(volId, tgtNqn, path, initiator)
- log.Infof("new subdir: %s", subexisted)
- } else {
- log.Infof("%s subsystem has existed", tgtNqn)
- }
-
- subexisted = NvmetDir + "/subsystems/" + tgtNqn
- log.Infof("new subdir: %s", subexisted)
- // subexisted, err = t.GetNvmeofSubsystem(tgtNqn)
- // log.Infof("new subdir: %s ", subexisted)
- // if subexisted == "" {
- // log.Infof("still no subsystem after add new subsystem")
- // //t.RemoveNvmeofSubsystem(volId, tgtNqn)
- // return errors.New("still can not get subsystem after add new one")
- // }
- //
- //create port
- portid := t.convertTranstype(transtype)
- portspath := NvmetDir + "/ports/" + portid
- if exist, _ := utils.PathExists(portspath); !exist {
- //log.Errorf(portspath)
- os.MkdirAll(portspath, 0755)
- }
-
- // get target ip
- // here the ip should be the ip interface of the specific nic
- // for example, if transport type is rdma, then the rdma ip should be used.
- // here just set the generic ip address since tcp is the default choice.
- ippath := portspath + "/addr_traddr"
- ip, err := t.execCmd("hostname", "-I")
- if err != nil {
- log.Errorf("fail to get target ipv4 address")
- t.RemoveNvmeofTarget(volId, tgtNqn, transtype)
- return err
- }
-
- ip = strings.Split(ip, " ")[0]
- err = t.WriteWithIo(ippath, ip)
- if err != nil {
- log.Errorf("Fail to set target ip")
- t.RemoveNvmeofTarget(volId, tgtNqn, transtype)
- return err
- }
-
- trtypepath := portspath + "/addr_trtype"
- err = t.WriteWithIo(trtypepath, transtype)
- if err != nil {
- log.Errorf("Fail to set transport type")
- t.RemoveNvmeofTarget(volId, tgtNqn, transtype)
- return err
- }
-
- trsvcidpath := portspath + "/addr_trsvcid"
- err = t.WriteWithIo(trsvcidpath, "4420")
- if err != nil {
- log.Errorf("Fail to set ip port")
- t.RemoveNvmeofTarget(volId, tgtNqn, transtype)
- return err
- }
-
- adrfampath := portspath + "/addr_adrfam"
- err = t.WriteWithIo(adrfampath, "ipv4")
- if err != nil {
- log.Errorf("Fail to set ip family")
- t.RemoveNvmeofTarget(volId, tgtNqn, transtype)
- return err
- }
-
- // create a soft link
- portssub := portspath + "/subsystems/" + tgtNqn
- _, err = t.execCmd("ln", "-s", subexisted, portssub)
- if err != nil {
- log.Errorf("Fail to create link")
- t.RemoveNvmeofTarget(volId, tgtNqn, transtype)
- return err
- }
-
- // check
- info, err := t.execBash("dmesg | grep 'enabling port' ")
- if err != nil || info == "" {
- log.Errorf("nvme target is not listening on the port")
- t.RemoveNvmeofTarget(volId, tgtNqn, transtype)
- return err
- }
- log.Info("create nvme target")
- return nil
-}
-
-func (t *NvmeoftgtTarget) GetNvmeofTarget(nqn, transtype string) (bool, error) {
- portid := t.convertTranstype(transtype)
-
- targetlinkpath := NvmetDir + "/ports/" + portid + "/subsystems/" + nqn
- if _, err := os.Lstat(targetlinkpath); err == nil {
- return true, nil
-
- } else if os.IsNotExist(err) {
- return false, nil
-
- } else {
- log.Errorf("can not get nvmeof target")
- return false, err
- }
-
-}
-
-func (t *NvmeoftgtTarget) RemoveNvmeofSubsystem(volId, nqn string) error {
- log.Info("removing subsystem", nqn)
- tgtConfPath := NvmetDir + "/subsystems/" + nqn
- if exist, _ := utils.PathExists(tgtConfPath); !exist {
- log.Warningf("Volume path %s does not exist, nothing to remove.", tgtConfPath)
- return nil
- }
-
- // remove namespace, whether it succeed or not, the removement should be executed.
- ns := t.Getnamespaceid(volId)
- if ns == "" {
- log.Infof("can not find volume %s's namespace", volId)
- // return errors.New("null namespace")
- }
- naspPath := NvmetDir + "/subsystems/" + nqn + "/namespaces/" + ns
- info, err := t.execCmd("rmdir", naspPath)
- if err != nil {
- log.Infof("can not rm nasp")
- // return err
- }
-
- // remove namespaces ; if it allows all initiators ,then this dir should be empty
- // if it allow specific hosts ,then here remove all the hosts
- cmd := "rm -f " + NvmetDir + "/subsystems/" + nqn + "/allowed_hosts/" + "*"
- info, err = t.execBash(cmd)
- if err != nil {
- log.Infof("can not rm allowed hosts")
- log.Infof(info)
- // return err
- }
-
- // remove subsystem
- syspath := NvmetDir + "/subsystems/" + nqn
- info, err = t.execCmd("rmdir", syspath)
- if err != nil {
- log.Infof("can not rm subsys")
- return err
- }
- return nil
-}
-
-func (t *NvmeoftgtTarget) RemoveNvmeofPort(nqn, transtype string) error {
- log.Infof("removing nvmeof port %s", transtype)
- portid := t.convertTranstype(transtype)
-
- portpath := NvmetDir + "/ports/" + portid + "/subsystems/" + nqn
-
- // port's link has to be removed first or the subsystem cannot be removed
- tgtConfPath := NvmetDir + "/subsystems/" + nqn
- if exist, _ := utils.PathExists(tgtConfPath); !exist {
- log.Warningf("Volume path %s does not exist, nothing to remove.", tgtConfPath)
- return nil
- }
-
- info, err := t.execCmd("rm", "-f", portpath)
- if err != nil {
- log.Errorf("can not rm nvme port transtype: %s, nqn: %s", transtype, nqn)
- log.Errorf(info)
- return err
- }
- return nil
-}
-
-func (t *NvmeoftgtTarget) RemoveNvmeofTarget(volId, nqn, transtype string) error {
- log.Infof("removing target %s", nqn)
- if tgtexisted, err := t.GetNvmeofTarget(nqn, transtype); err != nil {
- log.Errorf("can not get nvmeof target %s with type %s", nqn, transtype)
- return err
- } else if tgtexisted == false {
- log.Infof("nvmeof target %s with type %s does not exist", nqn, transtype)
- } else {
- err = t.RemoveNvmeofPort(nqn, transtype)
- if err != nil {
- return err
- }
- }
-
- if subexisted, err := t.GetNvmeofSubsystem(nqn); err != nil {
- log.Errorf("can not get nvmeof subsystem %s ", nqn)
- return err
- } else if subexisted == "" {
- log.Errorf("subsystem %s does not exist", nqn)
- return nil
- } else {
- err = t.RemoveNvmeofSubsystem(volId, nqn)
- if err != nil {
- log.Errorf("can not remove nvme subsystem %s", nqn)
- return err
- }
- }
- return nil
-}
-
-func (*NvmeoftgtTarget) execCmd(name string, cmd ...string) (string, error) {
- ret, err := exec.Command(name, cmd...).Output()
- if err != nil {
- log.Errorf("error info: %v", err)
- }
- return string(ret), err
-}
-
-func (*NvmeoftgtTarget) execBash(name string) (string, error) {
- ret, err := exec.Command("/bin/sh", "-c", name).Output()
- if err != nil {
- log.Errorf("error info in sh %v ", err)
- }
- return string(ret), err
-}
-
-func (*NvmeoftgtTarget) WriteWithIo(name, content string) error {
- fileObj, err := os.OpenFile(name, os.O_RDWR, 0644)
- if err != nil {
- log.Errorf("Failed to open the file %v", err)
- return err
- }
- if _, err := io.WriteString(fileObj, content); err == nil {
- log.Infof("Successful appending to the file with os.OpenFile and io.WriteString.%s", content)
- return nil
- }
- return err
-}
-
-func (t *NvmeoftgtTarget) Getnamespaceid(volId string) string {
- var buffer bytes.Buffer
- for _, rune := range volId {
- // nvme target namespace dir should not be like 00 or 0 ,
- // so only digits range from 1 to 9 are accepted
- if rune >= '1' && rune <= '9' {
- buffer.WriteRune(rune)
- }
- }
- return buffer.String()[0:2]
-}
diff --git a/contrib/drivers/lvm/targets/targets.go b/contrib/drivers/lvm/targets/targets.go
deleted file mode 100755
index 6b3cf04ef..000000000
--- a/contrib/drivers/lvm/targets/targets.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package targets
-
-import "github.com/opensds/opensds/contrib/drivers/utils/config"
-
-const (
- iscsiTgtPrefix = "iqn.2017-10.io.opensds:"
- nvmeofTgtPrefix = "nqn.2019-01.com.opensds:nvme:"
-)
-
-// Target is an interface for exposing some operations of different targets,
-// currently support iscsiTarget.
-type Target interface {
- CreateExport(volId, path, hostIp, initiator string, chapAuth []string) (map[string]interface{}, error)
-
- RemoveExport(volId, hostIp string) error
-}
-
-// NewTarget method creates a new target based on its type.
-func NewTarget(bip string, tgtConfDir string, access string) Target {
- switch access {
- case config.ISCSIProtocol:
- return &iscsiTarget{
- ISCSITarget: NewISCSITarget(bip, tgtConfDir),
- }
- case config.NVMEOFProtocol:
- return &nvmeofTarget{
- NvmeofTarget: NewNvmeofTarget(bip, tgtConfDir),
- }
- default:
- return nil
- }
-}
-
-type iscsiTarget struct {
- ISCSITarget
-}
-
-func (t *iscsiTarget) CreateExport(volId, path, hostIp, initiator string, chapAuth []string) (map[string]interface{}, error) {
- tgtIqn := iscsiTgtPrefix + volId
- if err := t.CreateISCSITarget(volId, tgtIqn, path, hostIp, initiator, chapAuth); err != nil {
- return nil, err
- }
- lunId := t.GetLun(path)
- conn := map[string]interface{}{
- "targetDiscovered": true,
- "targetIQN": []string{tgtIqn},
- "targetPortal": []string{t.ISCSITarget.(*tgtTarget).BindIp + ":3260"},
- "discard": false,
- "targetLun": lunId,
- }
- if len(chapAuth) == 2 {
- conn["authMethod"] = "chap"
- conn["authUserName"] = chapAuth[0]
- conn["authPassword"] = chapAuth[1]
- }
- return conn, nil
-}
-
-func (t *iscsiTarget) RemoveExport(volId, hostIp string) error {
- tgtIqn := iscsiTgtPrefix + volId
- return t.RemoveISCSITarget(volId, tgtIqn, hostIp)
-}
-
-type nvmeofTarget struct {
- NvmeofTarget
-}
-
-func (t *nvmeofTarget) CreateExport(volId, path, hostIp, initiator string, chapAuth []string) (map[string]interface{}, error) {
- tgtNqn := nvmeofTgtPrefix + volId
- // So far nvmeof transtport type is defaultly set as tcp because of its widely use, but it can also be rdma/fc.
- // The difference of transport type leads to different performance of volume attachment latency and iops.
- // This choice of transport type depends on 3 following factors:
- // 1. initiator's latency/iops requiremnet
- // 2. initiator's availiable nic(whether the inititator can use rdma/fc/tcpip)
- // 3. target server's availiable nic(whether the target server can use rdma/fc/tcpip)
- // According to the opensds architecture, it is a more approprite way for the opensds controller
- //to take the decision in the future.
- var transtype string
- transtype = "tcp"
- if err := t.CreateNvmeofTarget(volId, tgtNqn, path, initiator, transtype); err != nil {
- return nil, err
- }
- conn := map[string]interface{}{
- "targetDiscovered": true,
- "targetNQN": tgtNqn,
- "targetIP": t.NvmeofTarget.(*NvmeoftgtTarget).BindIp,
- "targetPort": "4420",
- "hostNqn": initiator,
- "discard": false,
- "transporType": transtype,
- }
-
- return conn, nil
-}
-
-func (t *nvmeofTarget) RemoveExport(volId, hostIp string) error {
- tgtNqn := nvmeofTgtPrefix + volId
- // So far nvmeof transtport type is defaultly set as tcp because of its widely use, but it can also be rdma/fc.
- // The difference of transport type leads to different performance of volume attachment latency and iops.
- // This choice of transport type depends on 3 following factors:
- // 1. initiator's latency/iops requiremnet
- // 2. initiator's availiable nic(whether the inititator can use rdma/fc/tcpip)
- // 3. target server's availiable nic(whether the target server can use rdma/fc/tcpip)
- // According to the opensds architecture, it is a more approprite way for the opensds controller
- //to take the decision in the future.
- var transtype string
- transtype = "tcp"
- return t.RemoveNvmeofTarget(volId, tgtNqn, transtype)
-}
diff --git a/contrib/drivers/lvm/testdata/lvm.yaml b/contrib/drivers/lvm/testdata/lvm.yaml
deleted file mode 100644
index c768998bc..000000000
--- a/contrib/drivers/lvm/testdata/lvm.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-tgtBindIp: 192.168.56.105
-tgtConfDir: /etc/tgt/conf.d
-enableChapAuth: true
-pool:
- vg001:
- storageType: block
- availabilityZone: default
- multiAttach: true
- extras:
- dataStorage:
- provisioningPolicy: Thin
- compression: false
- deduplication: false
- ioConnectivity:
- accessProtocol: iscsi
- maxIOPS: 7000000
- maxBWS: 600
- minIOPS: 1000000
- minBWS: 100
- latency: 100
- advanced:
- diskType: SSD
- latency: 5ms
diff --git a/contrib/drivers/netapp/ontap/constants.go b/contrib/drivers/netapp/ontap/constants.go
deleted file mode 100644
index 6712d0738..000000000
--- a/contrib/drivers/netapp/ontap/constants.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific l
-
-package ontap
-
-// default value for driver
-const (
- defaultConfPath = "/etc/opensds/driver/netapp_ontap_san.yaml"
- DefaultAZ = "default"
- volumePrefix = "opensds_"
- snapshotPrefix = "opensds_snapshot_"
- naaPrefix = "60a98000" //Applicable to Clustered Data ONTAP and Data ONTAP 7-Mode
- KLvPath = "lunPath"
- KLvIdFormat = "NAA"
- StorageDriverName = "ontap-san"
- driverContext = "csi"
- VolumeVersion = "1"
- SnapshotVersion = "1"
- accessMode = ""
- volumeMode = "Block"
- bytesGiB = 1073741824
-)
diff --git a/contrib/drivers/netapp/ontap/model.go b/contrib/drivers/netapp/ontap/model.go
deleted file mode 100644
index bf7e7b252..000000000
--- a/contrib/drivers/netapp/ontap/model.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package ontap
-
-import (
- "github.com/netapp/trident/storage_drivers/ontap"
- . "github.com/opensds/opensds/contrib/drivers/utils/config"
-)
-
-type BackendOptions struct {
- Version int `yaml:"version"`
- StorageDriverName string `yaml:"storageDriverName"`
- ManagementLIF string `yaml:"managementLIF"`
- DataLIF string `yaml:"dataLIF"`
- Svm string `yaml:"svm"`
- IgroupName string `yaml:"igroupName"`
- Username string `yaml:"username"`
- Password string `yaml:"password"`
-}
-
-type ONTAPConfig struct {
- BackendOptions `yaml:"backendOptions"`
- Pool map[string]PoolProperties `yaml:"pool,flow"`
-}
-
-type SANDriver struct {
- sanStorageDriver *ontap.SANStorageDriver
- conf *ONTAPConfig
-}
-
-type Pool struct {
- PoolId int `json:"poolId"`
- TotalCapacity int64 `json:"totalCapacity"`
- AllocCapacity int64 `json:"allocatedCapacity"`
- UsedCapacity int64 `json:"usedCapacity"`
-}
diff --git a/contrib/drivers/netapp/ontap/ontap_san.go b/contrib/drivers/netapp/ontap/ontap_san.go
deleted file mode 100644
index 734156b4e..000000000
--- a/contrib/drivers/netapp/ontap/ontap_san.go
+++ /dev/null
@@ -1,483 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package ontap
-
-import (
- "encoding/hex"
- "encoding/json"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/ghodss/yaml"
- log "github.com/golang/glog"
- uuid "github.com/satori/go.uuid"
-
- "github.com/netapp/trident/storage"
- sa "github.com/netapp/trident/storage_attribute"
- drivers "github.com/netapp/trident/storage_drivers"
- "github.com/netapp/trident/storage_drivers/ontap"
- "github.com/netapp/trident/storage_drivers/ontap/api"
- "github.com/netapp/trident/utils"
-
- odu "github.com/opensds/opensds/contrib/drivers/utils"
- . "github.com/opensds/opensds/contrib/drivers/utils/config"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- "github.com/opensds/opensds/pkg/utils/config"
-)
-
-func lunPath(name string) string {
- return fmt.Sprintf("/vol/%v/lun0", name)
-}
-
-func getVolumeName(id string) string {
- r := strings.NewReplacer("-", "")
- return volumePrefix + r.Replace(id)
-}
-
-func getSnapshotName(id string) string {
- r := strings.NewReplacer("-", "")
- return snapshotPrefix + r.Replace(id)
-}
-
-// Get LUN Serial Number
-func (d *SANDriver) getLunSerialNumber(lunPath string) (string, error) {
-
- lunSrNumber, err := d.sanStorageDriver.API.LunGetSerialNumber(lunPath)
- if err != nil {
- return "", fmt.Errorf("problem reading maps for LUN %s: %v", lunPath, err)
- }
-
- return naaPrefix + hex.EncodeToString([]byte(lunSrNumber.Result.SerialNumber())), nil
-}
-
-func (d *SANDriver) GetVolumeConfig(name string, size int64) (volConfig *storage.VolumeConfig) {
- volConfig = &storage.VolumeConfig{
- Version: VolumeVersion,
- Name: name,
- InternalName: name,
- Size: strconv.FormatInt(size*bytesGiB, 10),
- Protocol: d.sanStorageDriver.GetProtocol(),
- AccessMode: accessMode,
- VolumeMode: volumeMode,
- AccessInfo: utils.VolumeAccessInfo{},
- }
- return volConfig
-}
-
-func (d *SANDriver) GetSnapshotConfig(snapName string, volName string) (snapConfig *storage.SnapshotConfig) {
- snapConfig = &storage.SnapshotConfig{
- Version: SnapshotVersion,
- Name: snapName,
- InternalName: snapName,
- VolumeName: volName,
- VolumeInternalName: volName,
- }
- return snapConfig
-}
-
-func (d *SANDriver) Setup() error {
- // Read NetApp ONTAP config file
- d.conf = &ONTAPConfig{}
-
- p := config.CONF.OsdsDock.Backends.NetappOntapSan.ConfigPath
- if "" == p {
- p = defaultConfPath
- }
- if _, err := Parse(d.conf, p); err != nil {
- return err
- }
-
- defer func() {
- if r := recover(); r != nil {
- log.Error("unable to instantiate ontap backend.")
- }
- }()
-
- empty := ""
- config := &drivers.OntapStorageDriverConfig{
- CommonStorageDriverConfig: &drivers.CommonStorageDriverConfig{
- Version: d.conf.Version,
- StorageDriverName: StorageDriverName,
- StoragePrefixRaw: json.RawMessage("{}"),
- StoragePrefix: &empty,
- },
- ManagementLIF: d.conf.ManagementLIF,
- DataLIF: d.conf.DataLIF,
- IgroupName: d.conf.IgroupName,
- SVM: d.conf.Svm,
- Username: d.conf.Username,
- Password: d.conf.Password,
- }
- marshaledJSON, err := json.Marshal(config)
- if err != nil {
- log.Fatal("unable to marshal ONTAP config: ", err)
- }
- configJSON := string(marshaledJSON)
-
- // Convert config (JSON or YAML) to JSON
- configJSONBytes, err := yaml.YAMLToJSON([]byte(configJSON))
- if err != nil {
- err = fmt.Errorf("invalid config format: %v", err)
- return err
- }
- configJSON = string(configJSONBytes)
-
- // Parse the common config struct from JSON
- commonConfig, err := drivers.ValidateCommonSettings(configJSON)
- if err != nil {
- err = fmt.Errorf("input failed validation: %v", err)
- return err
- }
-
- d.sanStorageDriver = &ontap.SANStorageDriver{
- Config: *config,
- }
-
- // Initialize the driver.
- if err = d.sanStorageDriver.Initialize(driverContext, configJSON, commonConfig); err != nil {
- log.Errorf("could not initialize storage driver (%s). failed: %v", commonConfig.StorageDriverName, err)
- return err
- }
- log.Infof("storage driver (%s) initialized successfully.", commonConfig.StorageDriverName)
-
- return nil
-}
-
-func (d *SANDriver) Unset() error {
- //driver to clean up and stop any ongoing operations.
- d.sanStorageDriver.Terminate()
- return nil
-}
-
-func (d *SANDriver) CreateVolume(opt *pb.CreateVolumeOpts) (vol *model.VolumeSpec, err error) {
-
- if opt.GetSnapshotId() != "" {
- return d.createVolumeFromSnapshot(opt)
- }
-
- var name = getVolumeName(opt.GetId())
- volConfig := d.GetVolumeConfig(name, opt.GetSize())
-
- storagePool := &storage.Pool{
- Name: opt.GetPoolName(),
- StorageClasses: make([]string, 0),
- Attributes: make(map[string]sa.Offer),
- InternalAttributes: make(map[string]string),
- }
-
- err = d.sanStorageDriver.Create(volConfig, storagePool, make(map[string]sa.Request))
- if err != nil {
- log.Errorf("create volume (%s) failed: %v", opt.GetId(), err)
- return nil, err
- }
-
- lunPath := lunPath(name)
-
- // Get LUN Serial Number
- lunSerialNumber, err := d.getLunSerialNumber(lunPath)
- if err != nil {
- log.Errorf("create volume (%s) failed: %v", opt.GetId(), err)
- return nil, err
- }
-
- log.Infof("volume (%s) created successfully.", opt.GetId())
-
- return &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Size: opt.GetSize(),
- Description: opt.GetDescription(),
- Identifier: &model.Identifier{DurableName: lunSerialNumber, DurableNameFormat: KLvIdFormat},
- Metadata: map[string]string{
- KLvPath: lunPath,
- },
- }, nil
-}
-
-func (d *SANDriver) createVolumeFromSnapshot(opt *pb.CreateVolumeOpts) (*model.VolumeSpec, error) {
-
- var snapName = getSnapshotName(opt.GetSnapshotId())
- var volName = opt.GetMetadata()["volume"]
- var name = getVolumeName(opt.GetId())
-
- volConfig := d.GetVolumeConfig(name, opt.GetSize())
- volConfig.CloneSourceVolumeInternal = volName
- volConfig.CloneSourceSnapshot = volName
- volConfig.CloneSourceSnapshot = snapName
-
- err := d.sanStorageDriver.CreateClone(volConfig)
- if err != nil {
- log.Errorf("create volume (%s) from snapshot (%s) failed: %v", opt.GetId(), opt.GetSnapshotId(), err)
- return nil, err
- }
-
- lunPath := lunPath(name)
-
- // Get LUN Serial Number
- lunSerialNumber, err := d.getLunSerialNumber(lunPath)
- if err != nil {
- log.Errorf("create volume (%s) from snapshot (%s) failed: %v", opt.GetId(), opt.GetSnapshotId(), err)
- return nil, err
- }
-
- log.Infof("volume (%s) created from snapshot (%s) successfully.", opt.GetId(), opt.GetSnapshotId())
-
- return &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Size: opt.GetSize(),
- Description: opt.GetDescription(),
- Identifier: &model.Identifier{DurableName: lunSerialNumber, DurableNameFormat: KLvIdFormat},
- Metadata: map[string]string{
- KLvPath: lunPath,
- },
- }, err
-}
-
-func (d *SANDriver) PullVolume(volId string) (*model.VolumeSpec, error) {
-
- return nil, &model.NotImplementError{"method PullVolume has not been implemented yet"}
-}
-
-func (d *SANDriver) DeleteVolume(opt *pb.DeleteVolumeOpts) error {
- var name = getVolumeName(opt.GetId())
- err := d.sanStorageDriver.Destroy(name)
- if err != nil {
- msg := fmt.Sprintf("delete volume (%s) failed: %v", opt.GetId(), err)
- log.Error(msg)
- return err
- }
- log.Infof("volume (%s) deleted successfully.", opt.GetId())
- return nil
-}
-
-// ExtendVolume ...
-func (d *SANDriver) ExtendVolume(opt *pb.ExtendVolumeOpts) (*model.VolumeSpec, error) {
- var name = getVolumeName(opt.GetId())
- volConfig := d.GetVolumeConfig(name, opt.GetSize())
-
- newSize := uint64(opt.GetSize() * bytesGiB)
- if err := d.sanStorageDriver.Resize(volConfig, newSize); err != nil {
- log.Errorf("extend volume (%s) failed, error: %v", name, err)
- return nil, err
- }
-
- log.Infof("volume (%s) extended successfully.", opt.GetId())
- return &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Size: opt.GetSize(),
- Description: opt.GetDescription(),
- Metadata: opt.GetMetadata(),
- }, nil
-}
-
-func (d *SANDriver) InitializeConnection(opt *pb.CreateVolumeAttachmentOpts) (*model.ConnectionInfo, error) {
-
- var name = getVolumeName(opt.GetVolumeId())
- hostInfo := opt.GetHostInfo()
- initiator := odu.GetInitiatorName(hostInfo.GetInitiators(), opt.GetAccessProtocol())
- hostName := hostInfo.GetHost()
-
- publishInfo := &utils.VolumePublishInfo{
- HostIQN: []string{initiator},
- HostIP: []string{hostInfo.GetIp()},
- HostName: hostName,
- }
-
- err := d.sanStorageDriver.Publish(name, publishInfo)
- if err != nil {
- msg := fmt.Sprintf("volume (%s) attachment is failed: %v", opt.GetVolumeId(), err)
- log.Errorf(msg)
- return nil, err
- }
-
- log.Infof("volume (%s) attachment is created successfully", opt.GetVolumeId())
-
- connInfo := &model.ConnectionInfo{
- DriverVolumeType: opt.GetAccessProtocol(),
- ConnectionData: map[string]interface{}{
- "target_discovered": true,
- "volumeId": opt.GetVolumeId(),
- "volume": name,
- "description": "NetApp ONTAP Attachment",
- "hostName": hostName,
- "initiator": initiator,
- "targetIQN": []string{publishInfo.IscsiTargetIQN},
- "targetPortal": []string{hostInfo.GetIp() + ":3260"},
- "targetLun": publishInfo.IscsiLunNumber,
- "igroup": publishInfo.IscsiIgroup,
- },
- }
-
- log.Infof("initialize connection successfully: %v", connInfo)
- return connInfo, nil
-}
-
-func (d *SANDriver) TerminateConnection(opt *pb.DeleteVolumeAttachmentOpts) error {
- var name = getVolumeName(opt.GetVolumeId())
-
- // Validate Flexvol exists before trying to Unmount
- volExists, err := d.sanStorageDriver.API.VolumeExists(name)
- if err != nil {
- return fmt.Errorf("error checking for existing volume (%s), error: %v", name, err)
- }
- if !volExists {
- log.Infof("volume %s already deleted, skipping destroy.", name)
- return nil
- }
-
- // Unmount the FlexVolume
- volUnmountResponse, err := d.sanStorageDriver.API.VolumeUnmount(name, true)
- if err != nil {
- return fmt.Errorf("error destroying volume %v: %v", name, err)
- }
- if zerr := api.NewZapiError(volUnmountResponse); !zerr.IsPassed() {
- return fmt.Errorf("error destroying volume %v: %v", name, zerr.Error())
- }
-
- log.Infof("termination connection successfully")
- return nil
-}
-
-func (d *SANDriver) CreateSnapshot(opt *pb.CreateVolumeSnapshotOpts) (snap *model.VolumeSnapshotSpec, err error) {
- var snapName = getSnapshotName(opt.GetId())
- var volName = getVolumeName(opt.GetVolumeId())
-
- snapConfig := d.GetSnapshotConfig(snapName, volName)
-
- snapshot, err := d.sanStorageDriver.CreateSnapshot(snapConfig)
-
- if err != nil {
- msg := fmt.Sprintf("create snapshot %s (%s) failed: %s", opt.GetName(), opt.GetId(), err)
- log.Error(msg)
- return nil, err
- }
-
- log.Infof("snapshot %s (%s) created successfully.", opt.GetName(), opt.GetId())
-
- return &model.VolumeSnapshotSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- Name: opt.GetName(),
- Description: opt.GetDescription(),
- VolumeId: opt.GetVolumeId(),
- Size: opt.GetSize(),
- Metadata: map[string]string{
- "name": snapName,
- "volume": volName,
- "creationTime": snapshot.Created,
- "size": strconv.FormatInt(snapshot.SizeBytes/bytesGiB, 10) + "G",
- },
- }, nil
-}
-
-func (d *SANDriver) PullSnapshot(snapIdentifier string) (*model.VolumeSnapshotSpec, error) {
- // not used, do nothing
- return nil, &model.NotImplementError{"method PullSnapshot has not been implemented yet"}
-}
-
-func (d *SANDriver) DeleteSnapshot(opt *pb.DeleteVolumeSnapshotOpts) error {
-
- var snapName = getSnapshotName(opt.GetId())
- var volName = getVolumeName(opt.GetVolumeId())
-
- snapConfig := d.GetSnapshotConfig(snapName, volName)
-
- err := d.sanStorageDriver.DeleteSnapshot(snapConfig)
-
- if err != nil {
- msg := fmt.Sprintf("delete volume snapshot (%s) failed: %v", opt.GetId(), err)
- log.Error(msg)
- return err
- }
- log.Infof("volume snapshot (%s) deleted successfully", opt.GetId())
- return nil
-}
-
-func (d *SANDriver) ListPools() ([]*model.StoragePoolSpec, error) {
-
- var pools []*model.StoragePoolSpec
-
- aggregates, err := d.sanStorageDriver.API.VserverGetAggregateNames()
-
- if err != nil {
- msg := fmt.Sprintf("list pools failed: %v", err)
- log.Error(msg)
- return nil, err
- }
-
- c := d.conf
- for _, aggr := range aggregates {
- if _, ok := c.Pool[aggr]; !ok {
- continue
- }
- aggregate, _ := d.sanStorageDriver.API.AggregateCommitment(aggr)
- aggregateCapacity := aggregate.AggregateSize / bytesGiB
- aggregateAllocatedCapacity := aggregate.TotalAllocated / bytesGiB
-
- pool := &model.StoragePoolSpec{
- BaseModel: &model.BaseModel{
- Id: uuid.NewV5(uuid.NamespaceOID, aggr).String(),
- },
- Name: aggr,
- TotalCapacity: int64(aggregateCapacity),
- FreeCapacity: int64(aggregateCapacity) - int64(aggregateAllocatedCapacity),
- ConsumedCapacity: int64(aggregateAllocatedCapacity),
- StorageType: c.Pool[aggr].StorageType,
- Extras: c.Pool[aggr].Extras,
- AvailabilityZone: c.Pool[aggr].AvailabilityZone,
- }
- if pool.AvailabilityZone == "" {
- pool.AvailabilityZone = DefaultAZ
- }
- pools = append(pools, pool)
- }
-
- log.Info("list pools successfully")
- return pools, nil
-}
-
-func (d *SANDriver) InitializeSnapshotConnection(opt *pb.CreateSnapshotAttachmentOpts) (*model.ConnectionInfo, error) {
-
- return nil, &model.NotImplementError{S: "method InitializeSnapshotConnection has not been implemented yet."}
-}
-
-func (d *SANDriver) TerminateSnapshotConnection(opt *pb.DeleteSnapshotAttachmentOpts) error {
-
- return &model.NotImplementError{S: "method TerminateSnapshotConnection has not been implemented yet."}
-
-}
-
-func (d *SANDriver) CreateVolumeGroup(opt *pb.CreateVolumeGroupOpts) (*model.VolumeGroupSpec, error) {
- return nil, &model.NotImplementError{"method CreateVolumeGroup has not been implemented yet"}
-}
-
-func (d *SANDriver) UpdateVolumeGroup(opt *pb.UpdateVolumeGroupOpts) (*model.VolumeGroupSpec, error) {
- return nil, &model.NotImplementError{"method UpdateVolumeGroup has not been implemented yet"}
-}
-
-func (d *SANDriver) DeleteVolumeGroup(opt *pb.DeleteVolumeGroupOpts) error {
- return &model.NotImplementError{"method DeleteVolumeGroup has not been implemented yet"}
-}
diff --git a/contrib/drivers/netapp/ontap/testdata/netapp_ontap_san.yaml b/contrib/drivers/netapp/ontap/testdata/netapp_ontap_san.yaml
deleted file mode 100644
index 6c7d0751c..000000000
--- a/contrib/drivers/netapp/ontap/testdata/netapp_ontap_san.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-backendOptions:
- version: 1
- username: "admin"
- password: "password"
- storageDriverName: "ontap-san"
- managementLIF: "127.0.0.1"
- dataLIF: "127.0.0.1"
- svm: "vserver"
- igroupName: "opensds"
-pool:
- ontap-pool:
- storageType: block
- availabilityZone: default
- multiAttach: true
- extras:
- dataStorage:
- provisioningPolicy: Thin
- compression: false
- deduplication: false
- ioConnectivity:
- accessProtocol: iscsi
diff --git a/contrib/drivers/openstack/cinder/cinder.go b/contrib/drivers/openstack/cinder/cinder.go
deleted file mode 100755
index 8517f4029..000000000
--- a/contrib/drivers/openstack/cinder/cinder.go
+++ /dev/null
@@ -1,492 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements cinder driver for OpenSDS. Cinder driver will pass
-these operation requests about volume to gophercloud which is an OpenStack
-Go SDK.
-
-*/
-
-package cinder
-
-import (
- "time"
-
- log "github.com/golang/glog"
- "github.com/gophercloud/gophercloud"
- "github.com/gophercloud/gophercloud/openstack"
- "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats"
- "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions"
- "github.com/gophercloud/gophercloud/openstack/blockstorage/noauth"
- snapshotsv2 "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots"
- volumesv2 "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes"
- "github.com/gophercloud/gophercloud/pagination"
- "github.com/opensds/opensds/contrib/drivers/utils"
- . "github.com/opensds/opensds/contrib/drivers/utils/config"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- "github.com/opensds/opensds/pkg/utils/config"
- "github.com/opensds/opensds/pkg/utils/pwd"
- uuid "github.com/satori/go.uuid"
-)
-
-const (
- defaultConfPath = "/etc/opensds/driver/cinder.yaml"
- KCinderVolumeId = "cinderVolumeId"
- KCinderSnapId = "cinderSnapId"
-)
-
-// Driver is a struct of Cinder backend, which can be called to manage block
-// storage service defined in gophercloud.
-type Driver struct {
- // Current block storage version
- blockStoragev2 *gophercloud.ServiceClient
- blockStoragev3 *gophercloud.ServiceClient
-
- conf *CinderConfig
-}
-
-// AuthOptions
-type AuthOptions struct {
- NoAuth bool `yaml:"noAuth,omitempty"`
- CinderEndpoint string `yaml:"cinderEndpoint,omitempty"`
- IdentityEndpoint string `yaml:"endpoint,omitempty"`
- DomainID string `yaml:"domainId,omitempty"`
- DomainName string `yaml:"domainName,omitempty"`
- Username string `yaml:"username,omitempty"`
- Password string `yaml:"password,omitempty"`
- PwdEncrypter string `yaml:"PwdEncrypter,omitempty"`
- EnableEncrypted bool `yaml:"EnableEncrypted,omitempty"`
- TenantID string `yaml:"tenantId,omitempty"`
- TenantName string `yaml:"tenantName,omitempty"`
-}
-
-// CinderConfig
-type CinderConfig struct {
- AuthOptions `yaml:"authOptions"`
- Pool map[string]PoolProperties `yaml:"pool,flow"`
-}
-
-//ListPoolOpts
-type ListPoolOpts struct {
- // ID of the tenant to look up storage pools for.
- TenantID string `q:"tenant_id"`
- // Whether to list extended details.
- Detail bool `q:"detail"`
- // Volume_Type of the StoragePool
- VolumeType string `q:"volume_type"`
-}
-
-// Struct of Pools listed by volumeType
-type PoolArray struct {
- Pools []StoragePool `json:"pools"`
-}
-
-type StoragePool struct {
- Name string `json:"name"`
- Capabilities Capabilities `json:"capabilities"`
-}
-
-type Capabilities struct {
- FreeCapacityGB float64 `json:"free_capacity_gb"`
- TotalCapacityGB float64 `json:"total_capacity_gb"`
-}
-
-func (opts ListPoolOpts) ToStoragePoolsListQuery() (string, error) {
- q, err := gophercloud.BuildQueryString(opts)
- return q.String(), err
-}
-
-// Setup
-func (d *Driver) Setup() error {
- // Read cinder config file
- d.conf = &CinderConfig{}
- p := config.CONF.OsdsDock.Backends.Cinder.ConfigPath
- if "" == p {
- p = defaultConfPath
- }
- Parse(d.conf, p)
-
- var pwdCiphertext = d.conf.Password
-
- if d.conf.EnableEncrypted {
- // Decrypte the password
- pwdTool := pwd.NewPwdEncrypter(d.conf.PwdEncrypter)
- password, err := pwdTool.Decrypter(pwdCiphertext)
- if err != nil {
- return err
- }
- pwdCiphertext = password
- }
-
- opts := gophercloud.AuthOptions{
- IdentityEndpoint: d.conf.IdentityEndpoint,
- DomainID: d.conf.DomainID,
- DomainName: d.conf.DomainName,
- Username: d.conf.Username,
- Password: pwdCiphertext,
- TenantID: d.conf.TenantID,
- TenantName: d.conf.TenantName,
- }
-
- if d.conf.NoAuth {
- provider, err := noauth.NewClient(opts)
- if err != nil {
- log.Error("When get no authentication options:", err)
- return err
- }
-
- d.blockStoragev2, err = noauth.NewBlockStorageNoAuth(provider, noauth.EndpointOpts{
- CinderEndpoint: d.conf.CinderEndpoint,
- })
- if err != nil {
- log.Error("When get no authentication block storage session:", err)
- return err
- }
- } else {
- provider, err := openstack.AuthenticatedClient(opts)
- if err != nil {
- log.Error("When get auth options:", err)
- return err
- }
-
- d.blockStoragev2, err = openstack.NewBlockStorageV2(provider, gophercloud.EndpointOpts{})
- if err != nil {
- log.Error("When get block storage session:", err)
- return err
- }
- }
-
- return nil
-}
-
-// Unset
-func (d *Driver) Unset() error { return nil }
-
-// CreateVolume
-func (d *Driver) CreateVolume(req *pb.CreateVolumeOpts) (*model.VolumeSpec, error) {
- //Configure create request body.
- opts := &volumesv2.CreateOpts{
- Name: req.GetName(),
- Description: req.GetDescription(),
- Size: int(req.GetSize()),
- }
-
- vol, err := volumesv2.Create(d.blockStoragev2, opts).Extract()
- if err != nil {
- log.Error("Cannot create volume:", err)
- return nil, err
- }
-
- // Currently dock framework doesn't support sync data from storage system,
- // therefore, it's necessary to wait for the result of resource's creation.
- // Timout after 10s.
- timeout := time.After(10 * time.Second)
- ticker := time.NewTicker(300 * time.Millisecond)
- done := make(chan bool, 1)
- go func() {
- for {
- select {
- case <-ticker.C:
- tmpVol, err := d.PullVolume(vol.ID)
- if err != nil {
- continue
- }
- if tmpVol.Status != "creating" {
- vol.Status = tmpVol.Status
- close(done)
- return
- }
- case <-timeout:
- close(done)
- return
- }
-
- }
- }()
- <-done
-
- return &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: req.GetId(),
- },
- Name: vol.Name,
- Description: vol.Description,
- Size: int64(vol.Size),
- AvailabilityZone: req.GetAvailabilityZone(),
- Status: vol.Status,
- Metadata: map[string]string{KCinderVolumeId: vol.ID},
- }, nil
-}
-
-// PullVolume
-func (d *Driver) PullVolume(volID string) (*model.VolumeSpec, error) {
- vol, err := volumesv2.Get(d.blockStoragev2, volID).Extract()
- if err != nil {
- log.Error("Cannot get volume:", err)
- return nil, err
- }
-
- return &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: volID,
- },
- Name: vol.Name,
- Description: vol.Description,
- Size: int64(vol.Size),
- Status: vol.Status,
- }, nil
-}
-
-// DeleteVolume
-func (d *Driver) DeleteVolume(req *pb.DeleteVolumeOpts) error {
- cinderVolId := req.Metadata[KCinderVolumeId]
- if err := volumesv2.Delete(d.blockStoragev2, cinderVolId, nil).ExtractErr(); err != nil {
- log.Error("Cannot delete volume:", err)
- return err
- }
-
- return nil
-}
-
-// ExtendVolume ...
-func (d *Driver) ExtendVolume(req *pb.ExtendVolumeOpts) (*model.VolumeSpec, error) {
- //Configure create request body.
- opts := &volumeactions.ExtendSizeOpts{
- NewSize: int(req.GetSize()),
- }
- cinderVolId := req.Metadata[KCinderVolumeId]
- err := volumeactions.ExtendSize(d.blockStoragev2, cinderVolId, opts).ExtractErr()
- if err != nil {
- log.Error("Cannot extend volume:", err)
- return nil, err
- }
-
- return &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: req.GetId(),
- },
- Name: req.GetName(),
- Description: req.GetDescription(),
- Size: int64(req.GetSize()),
- AvailabilityZone: req.GetAvailabilityZone(),
- }, nil
-}
-
-// InitializeConnection
-func (d *Driver) InitializeConnection(req *pb.CreateVolumeAttachmentOpts) (*model.ConnectionInfo, error) {
- opts := &volumeactions.InitializeConnectionOpts{
- IP: req.HostInfo.GetIp(),
- Host: req.HostInfo.GetHost(),
- Initiator: utils.GetInitiatorName(req.HostInfo.Initiators, req.AccessProtocol),
- Platform: req.HostInfo.GetPlatform(),
- OSType: req.HostInfo.GetOsType(),
- Multipath: &req.MultiPath,
- }
-
- cinderVolId := req.Metadata[KCinderVolumeId]
- conn, err := volumeactions.InitializeConnection(d.blockStoragev2, cinderVolId, opts).Extract()
- if err != nil {
- log.Error("Cannot initialize volume connection:", err)
- return nil, err
- }
-
- log.Error(conn)
- data := conn["data"].(map[string]interface{})
- log.Error(data)
- connData := map[string]interface{}{
- "accessMode": data["access_mode"],
- "targetDiscovered": data["target_discovered"],
- "targetIQN": []string{data["target_iqn"].(string)},
- "targetPortal": []string{data["target_portal"].(string)},
- "discard": false,
- "targetLun": data["target_lun"],
- }
- // If auth is enabled, add auth info.
- if authMethod, ok := data["auth_method"]; ok {
- connData["authMethod"] = authMethod
- connData["authPassword"] = data["auth_password"]
- connData["authUsername"] = data["auth_username"]
- }
- return &model.ConnectionInfo{
- DriverVolumeType: conn["driver_volume_type"].(string),
- ConnectionData: connData,
- }, nil
-}
-
-// TerminateConnection
-func (d *Driver) TerminateConnection(req *pb.DeleteVolumeAttachmentOpts) error {
- opts := volumeactions.TerminateConnectionOpts{
- IP: req.HostInfo.GetIp(),
- Host: req.HostInfo.GetHost(),
- Initiator: utils.GetInitiatorName(req.HostInfo.Initiators, req.AccessProtocol),
- Platform: req.HostInfo.GetPlatform(),
- OSType: req.HostInfo.GetOsType(),
- }
- cinderVolId := req.Metadata[KCinderVolumeId]
- return volumeactions.TerminateConnection(d.blockStoragev2, cinderVolId, opts).ExtractErr()
-}
-
-// CreateSnapshot
-func (d *Driver) CreateSnapshot(req *pb.CreateVolumeSnapshotOpts) (*model.VolumeSnapshotSpec, error) {
- cinderVolId := req.Metadata[KCinderVolumeId]
- opts := &snapshotsv2.CreateOpts{
- VolumeID: cinderVolId,
- Name: req.GetName(),
- Description: req.GetDescription(),
- }
-
- snp, err := snapshotsv2.Create(d.blockStoragev2, opts).Extract()
- if err != nil {
- log.Error("Cannot create snapshot:", err)
- return nil, err
- }
-
- // Currently dock framework doesn't support sync data from storage system,
- // therefore, it's necessary to wait for the result of resource's creation.
- // Timout after 10s.
- timeout := time.After(10 * time.Second)
- ticker := time.NewTicker(300 * time.Millisecond)
- done := make(chan bool, 1)
- go func() {
- for {
- select {
- case <-ticker.C:
- tmpSnp, err := d.PullSnapshot(snp.ID)
- if err != nil {
- continue
- }
- if tmpSnp.Status != "creating" {
- snp.Status = tmpSnp.Status
- close(done)
- return
- }
- case <-timeout:
- close(done)
- return
- }
-
- }
- }()
- <-done
-
- return &model.VolumeSnapshotSpec{
- BaseModel: &model.BaseModel{
- Id: req.GetId(),
- },
- Name: snp.Name,
- Description: snp.Description,
- Size: int64(snp.Size),
- Status: snp.Status,
- VolumeId: req.GetVolumeId(),
- Metadata: map[string]string{KCinderSnapId: snp.ID},
- }, nil
-}
-
-// PullSnapshot
-func (d *Driver) PullSnapshot(snapID string) (*model.VolumeSnapshotSpec, error) {
- snp, err := snapshotsv2.Get(d.blockStoragev2, snapID).Extract()
- if err != nil {
- log.Error("Cannot get snapshot:", err)
- return nil, err
- }
-
- return &model.VolumeSnapshotSpec{
- BaseModel: &model.BaseModel{
- Id: snapID,
- },
- Name: snp.Name,
- Description: snp.Description,
- Size: int64(snp.Size),
- Status: snp.Status,
- }, nil
-}
-
-// DeleteSnapshot
-func (d *Driver) DeleteSnapshot(req *pb.DeleteVolumeSnapshotOpts) error {
- cinderSnapId := req.Metadata[KCinderSnapId]
- if err := snapshotsv2.Delete(d.blockStoragev2, cinderSnapId).ExtractErr(); err != nil {
- log.Error("Cannot delete snapshot:", err)
- return err
- }
- return nil
-}
-
-func ExtractStoragePools(p pagination.Page) ([]StoragePool, error) {
- var s struct {
- StoragePools []StoragePool `json:"pools"`
- }
- err := (p.(schedulerstats.StoragePoolPage)).ExtractInto(&s)
- return s.StoragePools, err
-}
-
-// ListPools
-func (d *Driver) ListPools() ([]*model.StoragePoolSpec, error) {
- log.Info("Starting list pools in cinder drivers.")
- opts := ListPoolOpts{Detail: true}
-
- pages, err := schedulerstats.List(d.blockStoragev2, opts).AllPages()
- if err != nil {
- log.Error("Cannot list storage pools:", err)
- return nil, err
- }
-
- polpages, err := ExtractStoragePools(pages)
- if err != nil {
- log.Error("Cannot extract storage pools:", err)
- return nil, err
- }
- var pols []*model.StoragePoolSpec
- for _, page := range polpages {
- if _, ok := d.conf.Pool[page.Name]; !ok {
- continue
- }
-
- pol := &model.StoragePoolSpec{
- BaseModel: &model.BaseModel{
- Id: uuid.NewV5(uuid.NamespaceOID, page.Name).String(),
- },
- Name: page.Name,
- TotalCapacity: int64(page.Capabilities.TotalCapacityGB),
- FreeCapacity: int64(page.Capabilities.FreeCapacityGB),
- StorageType: d.conf.Pool[page.Name].StorageType,
- AvailabilityZone: d.conf.Pool[page.Name].AvailabilityZone,
- Extras: d.conf.Pool[page.Name].Extras,
- MultiAttach: d.conf.Pool[page.Name].MultiAttach,
- }
- pols = append(pols, pol)
- }
- return pols, nil
-}
-
-func (d *Driver) InitializeSnapshotConnection(opt *pb.CreateSnapshotAttachmentOpts) (*model.ConnectionInfo, error) {
- return nil, &model.NotImplementError{S: "method InitializeSnapshotConnection has not been implemented yet"}
-}
-
-func (d *Driver) TerminateSnapshotConnection(opt *pb.DeleteSnapshotAttachmentOpts) error {
- return &model.NotImplementError{S: "method TerminateSnapshotConnection has not been implemented yet"}
-}
-
-func (d *Driver) CreateVolumeGroup(opt *pb.CreateVolumeGroupOpts) (*model.VolumeGroupSpec, error) {
- return nil, &model.NotImplementError{"method CreateVolumeGroup has not been implemented yet"}
-}
-
-func (d *Driver) UpdateVolumeGroup(opt *pb.UpdateVolumeGroupOpts) (*model.VolumeGroupSpec, error) {
- return nil, &model.NotImplementError{"method UpdateVolumeGroup has not been implemented yet"}
-}
-
-func (d *Driver) DeleteVolumeGroup(opt *pb.DeleteVolumeGroupOpts) error {
- return &model.NotImplementError{"method DeleteVolumeGroup has not been implemented yet"}
-}
diff --git a/contrib/drivers/replication_drivers.go b/contrib/drivers/replication_drivers.go
deleted file mode 100755
index 9cc213157..000000000
--- a/contrib/drivers/replication_drivers.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module defines an standard table of storage driver. The default storage
-driver is sample driver used for testing. If you want to use other storage
-plugin, just modify Init() and Clean() method.
-
-*/
-
-package drivers
-
-import (
- "reflect"
-
- "github.com/opensds/opensds/contrib/drivers/drbd"
- "github.com/opensds/opensds/contrib/drivers/huawei/oceanstor"
- scms "github.com/opensds/opensds/contrib/drivers/scutech/cms"
- driversConfig "github.com/opensds/opensds/contrib/drivers/utils/config"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- "github.com/opensds/opensds/pkg/utils/config"
- replication_sample "github.com/opensds/opensds/testutils/driver"
-)
-
-// ReplicationDriver is an interface for exposing some operations of different
-// replication drivers, currently supporting DRBD.
-type ReplicationDriver interface {
- // Any initialization the replication driver does while starting.
- Setup() error
- // Any operation the replication driver does while stopping.
- Unset() error
-
- CreateReplication(opt *pb.CreateReplicationOpts) (*model.ReplicationSpec, error)
- DeleteReplication(opt *pb.DeleteReplicationOpts) error
- EnableReplication(opt *pb.EnableReplicationOpts) error
- DisableReplication(opt *pb.DisableReplicationOpts) error
- FailoverReplication(opt *pb.FailoverReplicationOpts) error
-}
-
-func IsSupportArrayBasedReplication(resourceType string) bool {
- v := reflect.ValueOf(config.CONF.Backends)
- t := reflect.TypeOf(config.CONF.Backends)
- for i := 0; i < t.NumField(); i++ {
- field := v.Field(i)
- tag := t.Field(i).Tag.Get("conf")
- if resourceType == tag && field.Interface().(config.BackendProperties).SupportReplication {
- return true
- }
- }
- return false
-}
-
-// Init
-func InitReplicationDriver(resourceType string) (ReplicationDriver, error) {
- var d ReplicationDriver
- switch resourceType {
- case driversConfig.DRBDDriverType:
- d = &drbd.ReplicationDriver{}
- break
- case driversConfig.HuaweiOceanStorBlockDriverType:
- d = &oceanstor.ReplicationDriver{}
- break
- case driversConfig.ScutechCMSDriverType:
- d = &scms.ReplicationDriver{}
- default:
- d = &replication_sample.ReplicationDriver{}
- break
- }
- err := d.Setup()
- return d, err
-}
-
-// Clean
-func CleanReplicationDriver(d ReplicationDriver) ReplicationDriver {
- // Execute different clean operations according to the ReplicationDriver type.
- switch d.(type) {
- case *drbd.ReplicationDriver:
- break
- case *oceanstor.ReplicationDriver:
- d = &oceanstor.ReplicationDriver{}
- default:
- break
- }
- d.Unset()
- d = nil
-
- return d
-}
diff --git a/contrib/drivers/scutech/cms/replication.go b/contrib/drivers/scutech/cms/replication.go
deleted file mode 100755
index 3bcb4cf9f..000000000
--- a/contrib/drivers/scutech/cms/replication.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package scms
-
-import (
- "path/filepath"
-
- "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
-)
-
-// Replication driver
-type ReplicationDriver struct{}
-
-// Setup
-func (r *ReplicationDriver) Setup() error { return nil }
-
-// Unset
-func (r *ReplicationDriver) Unset() error { return nil }
-
-// Create and start replication
-func (r *ReplicationDriver) CreateReplication(opt *pb.CreateReplicationOpts) (*model.ReplicationSpec, error) {
- glog.Infoln("CMS create migration task ...")
-
- replica := &model.ReplicationSpec{
- // TODO: return additional important information
- PrimaryReplicationDriverData: make(map[string]string),
- SecondaryReplicationDriverData: make(map[string]string),
- }
-
- isPrimary := opt.GetIsPrimary()
- if !isPrimary {
- // on CMSProxy, do nothing
- return replica, nil
- }
-
- bandwidth := opt.GetReplicationBandwidth()
- primaryData := opt.GetPrimaryReplicationDriverData()
- secondaryData := opt.GetSecondaryReplicationDriverData()
- primaryVolId := opt.GetPrimaryVolumeId()
- secondaryVolId := opt.GetSecondaryVolumeId()
- path, _ := filepath.EvalSymlinks(primaryData["Mountpoint"])
- primaryBackingDevice, _ := filepath.Abs(path)
-
- path, _ = secondaryData["Mountpoint"]
- secondaryBackingDevice, _ := filepath.Abs(path)
- glog.Infof("%s:%s\n", primaryBackingDevice, secondaryBackingDevice)
- sourceVol := CmsVolume{VolumeId: primaryVolId, VolumeName: primaryBackingDevice}
- targetVol := CmsVolume{VolumeId: secondaryVolId, VolumeName: secondaryBackingDevice}
-
- task := NewCmsTask(bandwidth, false)
- if err := task.AddVolume(sourceVol, targetVol); err != nil {
- return nil, err
- }
-
- cmsadm := NewCmsAdm()
- if _, err := cmsadm.CreateTask(task); err != nil {
- return nil, err
- }
-
- if _, err := cmsadm.Up(); err != nil {
- return nil, err
- }
-
- return replica, nil
-}
-
-// Delete replication
-func (r *ReplicationDriver) DeleteReplication(opt *pb.DeleteReplicationOpts) error {
- glog.Infoln("CMS delete migration task ...")
-
- isPrimary := opt.GetIsPrimary()
- if !isPrimary {
- return nil
- }
-
- cmsadm := NewCmsAdm()
- _, err := cmsadm.DeleteTask()
- return err
-}
-
-// Start replication
-func (r *ReplicationDriver) EnableReplication(opt *pb.EnableReplicationOpts) error {
- glog.Infoln("CMS start migration task ....")
-
- isPrimary := opt.GetIsPrimary()
- if !isPrimary {
- return nil
- }
-
- cmsadm := NewCmsAdm()
- _, err := cmsadm.Up()
- return err
-}
-
-// Stop replication
-func (r *ReplicationDriver) DisableReplication(opt *pb.DisableReplicationOpts) error {
- glog.Infoln("CMS stop migration task")
-
- isPrimary := opt.GetIsPrimary()
- if !isPrimary {
- return nil
- }
-
- cmsadm := NewCmsAdm()
-
- _, err := cmsadm.Down()
- return err
-}
-
-// Failover Replication
-func (r *ReplicationDriver) FailoverReplication(opt *pb.FailoverReplicationOpts) error {
- glog.Infoln("CMS failover ....")
- // Nothing to do here. Failover is executed automatically by CMS plugin.
- return nil
-}
diff --git a/contrib/drivers/scutech/cms/scmstask.go b/contrib/drivers/scutech/cms/scmstask.go
deleted file mode 100755
index 6c76f0238..000000000
--- a/contrib/drivers/scutech/cms/scmstask.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package scms
-
-import (
- "fmt"
-)
-
-type CmsVolume struct {
- VolumeId string
- VolumeName string
-}
-
-type CmsTask struct {
- bandwidth int64
- cdpFlag bool
- taskVolumes map[string]string
- volumeList map[string]CmsVolume
-}
-
-func NewCmsTask(bandwidth int64, cdpFlag bool) *CmsTask {
- return &CmsTask{
- bandwidth: bandwidth,
- cdpFlag: cdpFlag,
- taskVolumes: make(map[string]string),
- volumeList: make(map[string]CmsVolume),
- }
-}
-
-func checkVolume(c *CmsTask, volumeId string) bool {
- _, find := c.volumeList[volumeId]
-
- return find
-}
-
-func (t *CmsTask) AddVolume(source CmsVolume, target CmsVolume) error {
- if findSource := checkVolume(t, source.VolumeId); findSource {
- return fmt.Errorf("source volume[%s] already exists", source.VolumeId)
- }
-
- if findTarget := checkVolume(t, target.VolumeId); findTarget {
- return fmt.Errorf("target volume[%s] already exists", target.VolumeId)
- }
-
- t.taskVolumes[source.VolumeId] = target.VolumeId
- t.volumeList[source.VolumeId] = source
- t.volumeList[target.VolumeId] = target
-
- return nil
-}
diff --git a/contrib/drivers/scutech/cms/scmsutils.go b/contrib/drivers/scutech/cms/scmsutils.go
deleted file mode 100755
index 41f97ca6a..000000000
--- a/contrib/drivers/scutech/cms/scmsutils.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package scms
-
-import (
- "errors"
- "os/exec"
- "strconv"
-)
-
-const CMS_ADM = "/opt/cmsagent/cmsadm"
-
-const (
- CMS_CREATE = "--create"
- CMS_DELETE = "--remove"
- CMS_START = "--up"
- CMS_STOP = "--down"
- CMS_QUERY = "--query"
-)
-
-type CmsAdm struct {
-}
-
-func NewCmsAdm() *CmsAdm {
- return &CmsAdm{}
-}
-
-func (c *CmsAdm) CreateTask(t *CmsTask, arg ...string) ([]byte, error) {
- var argv = []string{CMS_CREATE}
-
- var option string
- option = "-b " + strconv.FormatInt(t.bandwidth, 10)
- argv = append(argv, option)
-
- if t.cdpFlag {
- option = "-j"
- argv = append(argv, option)
- }
-
- for svolId := range t.taskVolumes {
- tvolId := t.taskVolumes[svolId]
- svol := t.volumeList[svolId]
- tvol := t.volumeList[tvolId]
-
- option = ("-D " + svol.VolumeName + "," + tvol.VolumeName)
- argv = append(argv, option)
- }
-
- return cmdExec(CMS_ADM, argv)
-}
-
-func (c *CmsAdm) DeleteTask(arg ...string) ([]byte, error) {
- var argv = []string{CMS_DELETE}
- return cmdExec(CMS_ADM, argv)
-}
-
-func (c *CmsAdm) Up() ([]byte, error) {
- var argv = []string{CMS_START}
- return cmdExec(CMS_ADM, argv)
-}
-
-func (c *CmsAdm) Down() ([]byte, error) {
- var argv = []string{CMS_STOP}
- return cmdExec(CMS_ADM, argv)
-}
-
-func (c *CmsAdm) Query() ([]byte, error) {
- var argv = []string{CMS_QUERY}
- return cmdExec(CMS_ADM, argv)
-}
-
-func cmdExec(cmd string, argv []string) ([]byte, error) {
- out, err := exec.Command(cmd, argv[0:]...).Output()
- if err != nil {
- err = errors.New(string(out))
- }
- return out, err
-}
diff --git a/contrib/drivers/utils/utils.go b/contrib/drivers/utils/utils.go
deleted file mode 100644
index a175f9458..000000000
--- a/contrib/drivers/utils/utils.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package utils
-
-import (
- pb "github.com/opensds/opensds/pkg/model/proto"
-)
-
-// GetInitiatorName returns initiator name by protocol
-func GetInitiatorName(initiators []*pb.Initiator, protocol string) string {
- for _, initiator := range initiators {
- if initiator.Protocol == protocol {
- return initiator.PortName
- }
- }
- return ""
-}
-
-func GetInitiatorsByProtocol(initiators []*pb.Initiator, protocol string) []string {
- var protocolInitiators []string
-
- for _, initiator := range initiators {
- if initiator.Protocol == protocol {
- protocolInitiators = append(protocolInitiators, initiator.PortName)
- }
- }
-
- return protocolInitiators
-}
diff --git a/contrib/drivers/utils/utils_test.go b/contrib/drivers/utils/utils_test.go
deleted file mode 100644
index f4eb169e4..000000000
--- a/contrib/drivers/utils/utils_test.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package utils
-
-import (
- "testing"
-
- pb "github.com/opensds/opensds/pkg/model/proto"
-)
-
-func TestGetInitiatorName(t *testing.T) {
-
- fakeInitiators := []*pb.Initiator{
- &pb.Initiator{
- PortName: "fake1",
- Protocol: "iscsi",
- },
- &pb.Initiator{
- PortName: "fake2",
- Protocol: "fibre_channel",
- },
- }
-
- testCases := []struct {
- initiators []*pb.Initiator
- protocol string
- expected string
- }{
- {
- initiators: fakeInitiators,
- protocol: "iscsi",
- expected: "fake1",
- },
- {
- initiators: fakeInitiators,
- protocol: "fibre_channel",
- expected: "fake2",
- },
- {
- initiators: fakeInitiators,
- protocol: "fake_protocol",
- expected: "",
- },
- }
-
- for _, c := range testCases {
- actual := GetInitiatorName(c.initiators, c.protocol)
- if actual != c.expected {
- t.Errorf("Expected %v, get %v", c.expected, actual)
- }
- }
-
-}
diff --git a/contrib/exporters/lvm_exporter/lvm_exporter.go b/contrib/exporters/lvm_exporter/lvm_exporter.go
deleted file mode 100644
index 33e20318f..000000000
--- a/contrib/exporters/lvm_exporter/lvm_exporter.go
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-package main
-
-import (
- "fmt"
- "net/http"
- "os"
- "strconv"
- "sync"
-
- log "github.com/golang/glog"
- "github.com/opensds/opensds/contrib/drivers/lvm"
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/promhttp"
-)
-
-const DefaultPort = "8080"
-
-// struct for lvm collector that contains pointers
-// to prometheus descriptors for each metric we expose.
-type lvmCollector struct {
- mu sync.Mutex
- //volume metrics
- VolumeIOPS *prometheus.Desc
- VolumeReadThroughput *prometheus.Desc
- VolumeWriteThroughput *prometheus.Desc
- VolumeResponseTime *prometheus.Desc
- VolumeServiceTime *prometheus.Desc
- VolumeUtilization *prometheus.Desc
- //Disk metrics
- DiskIOPS *prometheus.Desc
- DiskReadThroughput *prometheus.Desc
- DiskWriteThroughput *prometheus.Desc
- DiskResponseTime *prometheus.Desc
- DiskServiceTime *prometheus.Desc
- DiskUtilization *prometheus.Desc
-}
-
-// constructor for lvm collector that
-// initializes every descriptor and returns a pointer to the collector
-func newLvmCollector() *lvmCollector {
- var labelKeys = []string{"device"}
-
- return &lvmCollector{
- VolumeIOPS: prometheus.NewDesc("lvm_volume_iops_tps",
- "Shows IOPS",
- labelKeys, nil,
- ),
- VolumeReadThroughput: prometheus.NewDesc("lvm_volume_read_throughput_kbs",
- "Shows ReadThroughput",
- labelKeys, nil,
- ),
- VolumeWriteThroughput: prometheus.NewDesc("lvm_volume_write_throughput_kbs",
- "Shows ReadThroughput",
- labelKeys, nil,
- ),
- VolumeResponseTime: prometheus.NewDesc("lvm_volume_response_time_ms",
- "Shows ReadThroughput",
- labelKeys, nil,
- ),
- VolumeServiceTime: prometheus.NewDesc("lvm_volume_service_time_ms",
- "Shows ServiceTime",
- labelKeys, nil,
- ),
- VolumeUtilization: prometheus.NewDesc("lvm_volume_utilization_prcnt",
- "Shows Utilization in percentage",
- labelKeys, nil,
- ),
- DiskIOPS: prometheus.NewDesc("lvm_disk_iops_tps",
- "Shows IOPS",
- labelKeys, nil,
- ),
- DiskReadThroughput: prometheus.NewDesc("lvm_disk_read_throughput_kbs",
- "Shows Disk ReadThroughput",
- labelKeys, nil,
- ),
- DiskWriteThroughput: prometheus.NewDesc("lvm_disk_write_throughput_kbs",
- "Shows Write Throughput",
- labelKeys, nil,
- ),
- DiskResponseTime: prometheus.NewDesc("lvm_disk_response_time_ms",
- "Shows Disk Response Time",
- labelKeys, nil,
- ),
- DiskServiceTime: prometheus.NewDesc("lvm_disk_service_time_ms",
- "Shows ServiceTime",
- labelKeys, nil,
- ),
- DiskUtilization: prometheus.NewDesc("lvm_disk_utilization_prcnt",
- "Shows Utilization in percentage",
- labelKeys, nil,
- ),
- }
-
-}
-
-// Describe function.
-// It essentially writes all descriptors to the prometheus desc channel.
-func (c *lvmCollector) Describe(ch chan<- *prometheus.Desc) {
-
- //Update this section with the each metric you create for a given collector
- ch <- c.VolumeIOPS
- ch <- c.VolumeReadThroughput
- ch <- c.VolumeWriteThroughput
- ch <- c.VolumeResponseTime
- ch <- c.VolumeServiceTime
- ch <- c.VolumeUtilization
- ch <- c.DiskIOPS
- ch <- c.DiskReadThroughput
- ch <- c.DiskWriteThroughput
- ch <- c.DiskResponseTime
- ch <- c.DiskServiceTime
- ch <- c.DiskUtilization
-}
-
-type Config struct {
- Type string `type`
- Devices []string `devices`
-}
-
-type Configs struct {
- Cfgs []*Config `resources`
-}
-
-// Collect implements required collect function for all promehteus collectors
-func (c *lvmCollector) Collect(ch chan<- prometheus.Metric) {
-
- c.mu.Lock()
- defer c.mu.Unlock()
-
- metricDriver := lvm.MetricDriver{}
- metricDriver.Setup()
-
- metricArray, _ := metricDriver.CollectMetrics()
- for _, metric := range metricArray {
- lableVals := []string{metric.InstanceName}
- switch metric.Component {
- case "volume":
- switch metric.Name {
- case "iops":
- ch <- prometheus.MustNewConstMetric(c.VolumeIOPS, prometheus.GaugeValue, metric.MetricValues[0].Value, lableVals...)
- case "read_throughput":
- ch <- prometheus.MustNewConstMetric(c.VolumeReadThroughput, prometheus.GaugeValue, metric.MetricValues[0].Value, lableVals...)
- case "write_throughput":
- ch <- prometheus.MustNewConstMetric(c.VolumeWriteThroughput, prometheus.GaugeValue, metric.MetricValues[0].Value, lableVals...)
- case "response_time":
- ch <- prometheus.MustNewConstMetric(c.VolumeResponseTime, prometheus.GaugeValue, metric.MetricValues[0].Value, lableVals...)
- case "service_time":
- ch <- prometheus.MustNewConstMetric(c.VolumeServiceTime, prometheus.GaugeValue, metric.MetricValues[0].Value, lableVals...)
- case "utilization_prcnt":
- ch <- prometheus.MustNewConstMetric(c.VolumeUtilization, prometheus.GaugeValue, metric.MetricValues[0].Value, lableVals...)
-
- }
- case "disk":
- switch metric.Name {
- case "iops":
- ch <- prometheus.MustNewConstMetric(c.DiskIOPS, prometheus.GaugeValue, metric.MetricValues[0].Value, lableVals...)
- case "read_throughput":
- ch <- prometheus.MustNewConstMetric(c.DiskReadThroughput, prometheus.GaugeValue, metric.MetricValues[0].Value, lableVals...)
- case "write_throughput":
- ch <- prometheus.MustNewConstMetric(c.DiskWriteThroughput, prometheus.GaugeValue, metric.MetricValues[0].Value, lableVals...)
- case "response_time":
- ch <- prometheus.MustNewConstMetric(c.DiskResponseTime, prometheus.GaugeValue, metric.MetricValues[0].Value, lableVals...)
- case "service_time":
- ch <- prometheus.MustNewConstMetric(c.DiskServiceTime, prometheus.GaugeValue, metric.MetricValues[0].Value, lableVals...)
- case "utilization_prcnt":
- ch <- prometheus.MustNewConstMetric(c.DiskUtilization, prometheus.GaugeValue, metric.MetricValues[0].Value, lableVals...)
-
- }
- }
- }
-}
-
-func validateCliArg(arg1 string) string {
- num, err := strconv.Atoi(arg1)
- if (err != nil) || (num > 65535) {
-
- fmt.Println("please enter a valid port number")
- os.Exit(1)
- }
- return arg1
-}
-
-// main function for lvm exporter
-// lvm exporter is a independent process which user can start if required
-func main() {
-
- var portNo string
- if len(os.Args) > 1 {
- portNo = validateCliArg(os.Args[1])
- } else {
- portNo = DefaultPort
- }
-
- //Create a new instance of the lvmcollector and
- //register it with the prometheus client.
- lvm := newLvmCollector()
- prometheus.MustRegister(lvm)
-
- //This section will start the HTTP server and expose
- //any metrics on the /metrics endpoint.
- http.Handle("/metrics", promhttp.Handler())
- log.Info("lvm exporter begining to serve on port :" + portNo)
- log.Fatal(http.ListenAndServe(":"+portNo, nil))
-}
diff --git a/docs/design/opensds_provisioner.md b/docs/design/opensds_provisioner.md
index 747a277b9..b272f67c1 100644
--- a/docs/design/opensds_provisioner.md
+++ b/docs/design/opensds_provisioner.md
@@ -174,4 +174,4 @@ References
https://github.com/kubernetes-incubator/external-storage
-https://github.com/opensds/opensds-api-specs/blob/master/northbound-api/v1/openapi-spec/swagger.yaml
\ No newline at end of file
+https://github.com/sodafoundation/controller-api-specs/blob/master/northbound-api/v1/openapi-spec/swagger.yaml
\ No newline at end of file
diff --git a/docs/readthedocs/contributing/Community-Contributing.md b/docs/readthedocs/contributing/Community-Contributing.md
index c837db809..cb8b33ff2 100644
--- a/docs/readthedocs/contributing/Community-Contributing.md
+++ b/docs/readthedocs/contributing/Community-Contributing.md
@@ -1,9 +1,9 @@
# Community-Contributing
## OpenSDS
-[![Go Report Card](https://goreportcard.com/badge/github.com/opensds/opensds?branch=master)](https://goreportcard.com/report/github.com/opensds/opensds)
-[![Build Status](https://travis-ci.org/opensds/opensds.svg?branch=master)](https://travis-ci.org/opensds/opensds)
-[![Coverage Status](https://coveralls.io/repos/github/opensds/opensds/badge.svg?branch=master)](https://coveralls.io/github/opensds/opensds?branch=master)
+[![Go Report Card](https://goreportcard.com/badge/github.com/sodafoundation/controller?branch=master)](https://goreportcard.com/report/github.com/sodafoundation/controller)
+[![Build Status](https://travis-ci.org/sodafoundation/controller.svg?branch=master)](https://travis-ci.org/sodafoundation/controller)
+[![Coverage Status](https://coveralls.io/repos/github/sodafoundation/controller/badge.svg?branch=master)](https://coveralls.io/github/sodafoundation/controller?branch=master)
@@ -50,10 +50,10 @@ When reporting issues, refer to this format:
### Propose PRs
-- Raise your idea as an [issue](https://github.com/opensds/opensds/issues)
+- Raise your idea as an [issue](https://github.com/sodafoundation/controller/issues)
- If it is a new feature that needs lots of design details, a design proposal should also be submitted [here](https://github.com/opensds/design-specs/pulls).
- After reaching consensus in the issue discussions and design proposal reviews, complete the development on the forked repo and submit a PR.
- Here are the [PRs](https://github.com/opensds/opensds/pulls?q=is%3Apr+is%3Aclosed) that are already closed.
+ Here are the [PRs](https://github.com/sodafoundation/controller/pulls?q=is%3Apr+is%3Aclosed) that are already closed.
- If a PR is submitted by one of the core members, it has to be merged by a different core member.
- After PR is sufficiently discussed, it will get merged, abondoned or rejected depending on the outcome of the discussion.
diff --git a/docs/readthedocs/contributing/How-to-Develop-OpenSDS-Native-Storage-Driver.md b/docs/readthedocs/contributing/How-to-Develop-OpenSDS-Native-Storage-Driver.md
index e45418c64..d915c0204 100644
--- a/docs/readthedocs/contributing/How-to-Develop-OpenSDS-Native-Storage-Driver.md
+++ b/docs/readthedocs/contributing/How-to-Develop-OpenSDS-Native-Storage-Driver.md
@@ -1,3 +1,3 @@
# How to Develop an OpenSDS Native Storage Driver
-To learn how to develop a native storage driver in OpenSDS, see the document [here](https://github.com/opensds/opensds/wiki/Develop-new-storage-driver-for-OpenSDS).
+To learn how to develop a native storage driver in OpenSDS, see the document [here](https://github.com/sodafoundation/controller/wiki/Develop-new-storage-driver-for-OpenSDS).
diff --git a/docs/readthedocs/contributing/Tutorials-for-Beginners.md b/docs/readthedocs/contributing/Tutorials-for-Beginners.md
index ba5232ab1..261f303be 100644
--- a/docs/readthedocs/contributing/Tutorials-for-Beginners.md
+++ b/docs/readthedocs/contributing/Tutorials-for-Beginners.md
@@ -20,7 +20,7 @@ git clone https://your_repo_url/opensds.git
To reduce the conflicts between your remote repo and opensds repo, we SUGGEST you configure opensds as the upstream repo:
```
-git remote add upstream https://github.com/opensds/opensds.git
+git remote add upstream https://github.com/sodafoundation/controller.git
git fetch upstream
```
diff --git a/docs/readthedocs/installation/Local-Cluster-Installation.md b/docs/readthedocs/installation/Local-Cluster-Installation.md
index 45b99681f..0645a24c7 100644
--- a/docs/readthedocs/installation/Local-Cluster-Installation.md
+++ b/docs/readthedocs/installation/Local-Cluster-Installation.md
@@ -1,17 +1,17 @@
# Local-Cluster-Installation
-Here is a tutorial guiding users and new contributors to get familiar with [OpenSDS](https://github.com/opensds/opensds) by installing a simple local cluster.
+Here is a tutorial guiding users and new contributors to get familiar with [OpenSDS](https://github.com/sodafoundation/controller) by installing a simple local cluster.
If you are an end-user who are interested in this project, some links for installation and testing are as follows:
-- [OpenSDS Local Cluster Installation through Ansible On Ubuntu](https://github.com/opensds/opensds/wiki/OpenSDS-Cluster-Installation-through-Ansible) **(Recommended)**
-- [OpenSDS Local Cluster Installation On Red Hat Enterprise Linux](https://github.com/opensds/opensds/wiki/OpenSDS-Cluster-Installation-On-Red-Hat-Enterprise-Linux)
+- [OpenSDS Local Cluster Installation through Ansible On Ubuntu](https://github.com/sodafoundation/controller/wiki/OpenSDS-Cluster-Installation-through-Ansible) **(Recommended)**
+- [OpenSDS Local Cluster Installation On Red Hat Enterprise Linux](https://github.com/sodafoundation/controller/wiki/OpenSDS-Cluster-Installation-On-Red-Hat-Enterprise-Linux)
If you are a code developer and wish to contribute in this project, here are some links for quickly installing OpenSDS
Hotpot project:
-- [OpenSDS Local Cluster Installation](https://github.com/opensds/opensds/wiki/OpenSDS-Local-Cluster-with-Multi-tenants-Installation) **(Recommended)**
-- [Run Containerized OpenSDS Service for Testing](https://github.com/opensds/opensds/wiki/How-to-Run-Containerized-OpenSDS-for-Testing-Work)
+- [OpenSDS Local Cluster Installation](https://github.com/sodafoundation/controller/wiki/OpenSDS-Local-Cluster-with-Multi-tenants-Installation) **(Recommended)**
+- [Run Containerized OpenSDS Service for Testing](https://github.com/sodafoundation/controller/wiki/How-to-Run-Containerized-OpenSDS-for-Testing-Work)
If you want to deploy and test opensds integrated with Kubernetes scenario, please refer to:
-- [OpenSDS Integration with Kubernetes CSI](https://github.com/opensds/opensds/wiki/OpenSDS-Integration-with-Kubernetes-CSI) **(Recommended)**
-- [OpenSDS Integration with Kubernetes Flexvolume](https://github.com/opensds/opensds/wiki/OpenSDS-Integration-with-Kubernetes-Flexvolume) **(Deprecated)**
-- [OpenSDS Installation with Kubernetes Service Catalog](https://github.com/opensds/opensds/wiki/OpenSDS-Installation-with-Kubernetes-Service-Catalog) **(Alpha)**
+- [OpenSDS Integration with Kubernetes CSI](https://github.com/sodafoundation/controller/wiki/OpenSDS-Integration-with-Kubernetes-CSI) **(Recommended)**
+- [OpenSDS Integration with Kubernetes Flexvolume](https://github.com/sodafoundation/controller/wiki/OpenSDS-Integration-with-Kubernetes-Flexvolume) **(Deprecated)**
+- [OpenSDS Installation with Kubernetes Service Catalog](https://github.com/sodafoundation/controller/wiki/OpenSDS-Installation-with-Kubernetes-Service-Catalog) **(Alpha)**
diff --git a/docs/readthedocs/installation/OpenSDS Bali Install Guide b/docs/readthedocs/installation/OpenSDS Bali Install Guide
index f0ed25a4a..d8ff93b76 100644
--- a/docs/readthedocs/installation/OpenSDS Bali Install Guide
+++ b/docs/readthedocs/installation/OpenSDS Bali Install Guide
@@ -1 +1 @@
-Here is a guide on [how to install OpenSDS Bali release](https://github.com/opensds/opensds/blob/master/docs/readthedocs/installation/OpenSDS%20Bali%20POC%20Deployment.pdf).
+Here is a guide on [how to install OpenSDS Bali release](https://github.com/sodafoundation/controller/blob/master/docs/readthedocs/installation/OpenSDS%20Bali%20POC%20Deployment.pdf).
diff --git a/docs/readthedocs/introduction/opensds-installer.md b/docs/readthedocs/introduction/opensds-installer.md
index c7b022f34..f67799379 100644
--- a/docs/readthedocs/introduction/opensds-installer.md
+++ b/docs/readthedocs/introduction/opensds-installer.md
@@ -1,6 +1,6 @@
# OpenSDS Installer
For detailed information about this project, please refer to the
-[repo](https://github.com/opensds/opensds-installer).
+[repo](https://github.com/sodafoundation/controller-installer).
## Introduction
This project is designed for locating the code for installing all required
@@ -23,4 +23,4 @@ charts folder for installing and configuring OpenSDS cluster through helm tool.
* Mailing list: [opensds-tech-discuss](https://lists.opensds.io/mailman/listinfo/opensds-tech-discuss)
* slack: #[opensds](https://opensds.slack.com)
-* Ideas/Bugs: [issues](https://github.com/opensds/opensds-installer/issues)
+* Ideas/Bugs: [issues](https://github.com/sodafoundation/controller-installer/issues)
diff --git a/docs/readthedocs/introduction/opensds.md b/docs/readthedocs/introduction/opensds.md
index 8f5ec3f34..1abbd7dec 100644
--- a/docs/readthedocs/introduction/opensds.md
+++ b/docs/readthedocs/introduction/opensds.md
@@ -28,12 +28,12 @@ now, please refer to the Contributing sections below.
* Mailing list: [opensds-tech-discuss](https://lists.opensds.io/mailman/listinfo/opensds-tech-discuss)
* slack: #[opensds](https://opensds.slack.com)
-* Ideas/Bugs: [issues](https://github.com/opensds/opensds/issues)
+* Ideas/Bugs: [issues](https://github.com/sodafoundation/controller/issues)
## REST API
### OpenAPI
-* [Bali](http://petstore.swagger.io/?url=https://raw.githubusercontent.com/opensds/opensds/v0.4.0/openapi-spec/swagger.yaml)
-* [Aruba](http://petstore.swagger.io/?url=https://raw.githubusercontent.com/opensds/opensds/v0.2.0/openapi-spec/swagger.yaml)
-* [Zealand](http://petstore.swagger.io/?url=https://raw.githubusercontent.com/opensds/opensds/v0.1.0/openapi-spec/swagger.yaml)
+* [Bali](http://petstore.swagger.io/?url=https://raw.githubusercontent.com/sodafoundation/controller/v0.4.0/openapi-spec/swagger.yaml)
+* [Aruba](http://petstore.swagger.io/?url=https://raw.githubusercontent.com/sodafoundation/controller/v0.2.0/openapi-spec/swagger.yaml)
+* [Zealand](http://petstore.swagger.io/?url=https://raw.githubusercontent.com/sodafoundation/controller/v0.1.0/openapi-spec/swagger.yaml)
diff --git a/docs/readthedocs/releases.rst b/docs/readthedocs/releases.rst
index afbbf1088..250d315de 100644
--- a/docs/readthedocs/releases.rst
+++ b/docs/readthedocs/releases.rst
@@ -31,11 +31,11 @@ The Bali release adds the following functionality:
The OpenSDS controller (Hotpot), the north bound plugins (Sushi), the multiple cloud(Gelato),
the opensds dashboard and the installer can be downloaded from here:
-`Hotpot `__
+`Hotpot `__
`Sushi `__
`Gelato `__
-`Dashboard `__
-`Installer `__
+`Dashboard `__
+`Installer `__
Aruba
@@ -66,6 +66,6 @@ The Aruba release adds the following functionality:
The OpenSDS controller (Hotpot), the north bound plugins (Sushi), and the
installer can be downloaded from here:
-`Hotpot `__
+`Hotpot `__
`Sushi `__
-`Installer `__
+`Installer `__
diff --git a/examples/driver/ceph.yaml b/examples/driver/ceph.yaml
deleted file mode 100755
index 6908cc220..000000000
--- a/examples/driver/ceph.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2018 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-configFile: /etc/ceph/ceph.conf
-pool:
- rbd:
- storageType: block
- availabilityZone: default
- extras:
- dataStorage:
- provisioningPolicy: Thin
- compression: true
- deduplication: true
- ioConnectivity:
- accessProtocol: rbd
- maxIOPS: 6000000
- maxBWS: 500
- minIOPS: 1000000
- minBWS: 100
- latency: 100
- advanced:
- diskType: SSD
- latency: 5ms
diff --git a/examples/driver/chubaofs.yaml b/examples/driver/chubaofs.yaml
deleted file mode 100644
index d51c2eb1d..000000000
--- a/examples/driver/chubaofs.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-clusterInfo:
- name: chubaofs-test
- masterAddr:
- - 192.168.0.100:80
- - 192.168.0.101:80
- - 192.168.0.102:80
- volumeCapLimit: 100000
-
-runtimeEnv:
- mntPoint: /mnt/chubaofs
- clientPath: /var/chubaofs
-
-pool:
- opensds-files-chubaofs:
- diskType: NL-SAS
- availabilityZone: default
- multiAttach: true
- storageType: file
- extras:
- dataStorage:
- storageAccessCapability:
- - Read
- - Write
- - Execute
- ioConnectivity:
- accessProtocol: NFS
-
diff --git a/examples/driver/cinder.yaml b/examples/driver/cinder.yaml
deleted file mode 100755
index 8cdf87bfd..000000000
--- a/examples/driver/cinder.yaml
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright 2018 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-authOptions:
- endpoint: "http://192.168.56.104/identity"
- domainName: "Default"
- username: "admin"
- password: "admin"
- # Whether to encrypt the password. If enabled, the value of the password must be ciphertext.
- EnableEncrypted: false
- # Encryption and decryption tool. Default value is aes. The decryption tool can only decrypt the corresponding ciphertext.
- PwdEncrypter: "aes"
- tenantName: "admin"
-pool:
- pool1:
- storageType: block
- availabilityZone: nova-01
- extras:
- dataStorage:
- provisioningPolicy: Thin
- compression: false
- deduplication: false
- ioConnectivity:
- accessProtocol: iscsi
- maxIOPS: 7000000
- maxBWS: 600
- minIOPS: 1000000
- minBWS: 100
- latency: 100
- advanced:
- diskType: SSD
- latency: 3ms
- pool2:
- storageType: block
- availabilityZone: nova-01
- extras:
- dataStorage:
- provisioningPolicy: Thin
- compression: false
- deduplication: false
- ioConnectivity:
- accessProtocol: iscsi
- maxIOPS: 3000000
- maxBWS: 300
- minIOPS: 1000000
- minBWS: 100
- latency: 100
- advanced:
- diskType: SSD
- latency: 500ms
diff --git a/examples/driver/drbd.yaml b/examples/driver/drbd.yaml
deleted file mode 100644
index 3cdaf4eb8..000000000
--- a/examples/driver/drbd.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# Minumum and Maximum TCP/IP ports used for DRBD replication.
-PortMin: 7000
-PortMax: 8000
-# Exaclty two hosts between resources are replicated.
-# Never ever change the Node-ID associated with a Host(name)
-Hosts:
- - Hostname: rckdeba
- IP: 10.43.70.115
- Node-ID: 0
-
- - Hostname: rckdebb
- IP: 10.43.70.116
- Node-ID: 1
diff --git a/examples/driver/hpe_nimble.yaml b/examples/driver/hpe_nimble.yaml
deleted file mode 100644
index 4464ca4b8..000000000
--- a/examples/driver/hpe_nimble.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-authOptions:
- endpoints: https://172.16.0.250:5392/v1
- username: admin
- password: admin
- insecure: true
-
-
-pool:
- test01:
- storageType: block
- availabilityZone: vnimble01
- extras:
- ioConnectivity:
- accessProtocol: fibre_channel
- default:
- storageType: block
- availabilityZone: vnimble01
- extras:
- ioConnectivity:
- accessProtocol: iscsi
diff --git a/examples/driver/huawei_fusionstorage.yaml b/examples/driver/huawei_fusionstorage.yaml
deleted file mode 100644
index 142296b5b..000000000
--- a/examples/driver/huawei_fusionstorage.yaml
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright 2018 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-authOptions:
- fmIp: 192.168.0.100
- fsaIp:
- - 192.168.0.1
- - 192.168.0.2
- - 192.168.0.3
-
-pool:
- 0:
- storageType: block
- availabilityZone: nova-01
- extras:
- dataStorage:
- provisioningPolicy: Thin
- compression: false
- deduplication: false
- ioConnectivity:
- accessProtocol: iscsi
- maxIOPS: 7000000
- maxBWS: 600
- minIOPS: 1000000
- minBWS: 100
- latency: 100
- advanced:
- diskType: SSD
- latency: 3ms
- 1:
- storageType: block
- availabilityZone: nova-01
- extras:
- dataStorage:
- provisioningPolicy: Thin
- compression: false
- deduplication: false
- ioConnectivity:
- accessProtocol: iscsi
- maxIOPS: 3000000
- maxBWS: 300
- minIOPS: 1000000
- minBWS: 100
- latency: 100
- advanced:
- diskType: SSD
- latency: 500ms
diff --git a/examples/driver/huawei_oceanstor_block.yaml b/examples/driver/huawei_oceanstor_block.yaml
deleted file mode 100644
index 0acdec051..000000000
--- a/examples/driver/huawei_oceanstor_block.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-authOptions:
- username: "root"
- password: "Admin@123"
- vstoreName: ""
- # Whether to encrypt the password. If enabled, the value of the password must be ciphertext.
- EnableEncrypted: false
- # Encryption and decryption tool. Default value is aes. The decryption tool can only decrypt the corresponding ciphertext.
- PwdEncrypter: "aes"
- endpoints: "https://0.0.0.0:8088/deviceManager/rest"
-
-pool:
- opensds_block:
- storageType: block
- availabilityZone: default
- extras:
- dataStorage:
- provisioningPolicy: Thin
- compression: false
- deduplication: false
- ioConnectivity:
- accessProtocol: iscsi
- maxIOPS: 7000000
- maxBWS: 600
- advanced:
- diskType: SSD
- latency: 3ms
diff --git a/examples/driver/lvm.yaml b/examples/driver/lvm.yaml
deleted file mode 100755
index db8d2f211..000000000
--- a/examples/driver/lvm.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2018 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-tgtBindIp: 127.0.0.1
-pool:
- vg001:
- storageType: block
- availabilityZone: default
- extras:
- dataStorage:
- provisioningPolicy: Thin
- compression: false
- deduplication: false
- ioConnectivity:
- accessProtocol: iscsi
- maxIOPS: 7000000
- maxBWS: 600
- minIOPS: 1000000
- minBWS: 100
- latency: 100
- advanced:
- diskType: SSD
- latency: 5ms
diff --git a/examples/driver/manila.yaml b/examples/driver/manila.yaml
deleted file mode 100644
index 985ca78de..000000000
--- a/examples/driver/manila.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright 2019 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-authOptions:
- endpoint: "http://10.10.3.101/identity"
- domainName: "Default"
- username: "admin"
- password: "opensds"
- # Whether to encrypt the password. If enabled, the value of the password must be ciphertext.
- enableEncrypted: false
- # Encryption and decryption tool. Default value is aes. The decryption tool can only decrypt the corresponding ciphertext.
- pwdEncrypter: "aes"
- tenantName: "admin"
-pool:
- pool1:
- storageType: file
- availabilityZone: default
- extras:
- dataStorage:
- provisioningPolicy: Thin
- compression: false
- deduplication: false
- storageAccessCapability:
- - Read
- - Write
- ioConnectivity:
- accessProtocol: NFS
- maxIOPS: 7000000
- maxBWS: 600
- minIOPS: 1000000
- minBWS: 100
- latency: 100
- advanced:
- diskType: SSD
- latency: 3ms
diff --git a/examples/driver/netapp_ontap_san.yaml b/examples/driver/netapp_ontap_san.yaml
deleted file mode 100644
index 6c7d0751c..000000000
--- a/examples/driver/netapp_ontap_san.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-backendOptions:
- version: 1
- username: "admin"
- password: "password"
- storageDriverName: "ontap-san"
- managementLIF: "127.0.0.1"
- dataLIF: "127.0.0.1"
- svm: "vserver"
- igroupName: "opensds"
-pool:
- ontap-pool:
- storageType: block
- availabilityZone: default
- multiAttach: true
- extras:
- dataStorage:
- provisioningPolicy: Thin
- compression: false
- deduplication: false
- ioConnectivity:
- accessProtocol: iscsi
diff --git a/examples/driver/nfs.yaml b/examples/driver/nfs.yaml
deleted file mode 100644
index 4f4fd4241..000000000
--- a/examples/driver/nfs.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2019 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-tgtBindIp: 100.64.40.97
-tgtConfDir: /etc/tgt/conf.d
-pool:
- opensds-files-default:
- diskType: NL-SAS
- availabilityZone: default
- multiAttach: true
- storageType: file
- extras:
- dataStorage:
- provisioningPolicy: Thin
- compression: false
- deduplication: false
- storageAccessCapability:
- - Read
- - Write
- - Execute
- ioConnectivity:
- accessProtocol: nfs
- maxIOPS: 7000000
- maxBWS: 600
- minIOPS: 1000000
- minBWS: 100
- latency: 100
- advanced:
- diskType: SSD
- latency: 5ms
diff --git a/examples/driver/spectrumscale.yaml b/examples/driver/spectrumscale.yaml
deleted file mode 100644
index b6d143050..000000000
--- a/examples/driver/spectrumscale.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright 2019 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-tgtBindIp: 127.0.0.1
-username: root
-password: ibm
-port: 2022
-pool:
- SILVER:
- storageType: block
- availabilityZone: default
- extras:
- dataStorage:
- provisioningPolicy: Thin
- compression: false
- deduplication: false
- ioConnectivity:
- accessProtocol: iscsi
- maxIOPS: 7000000
- maxBWS: 600
- minIOPS: 1000000
- minBWS: 100
- latency: 100
- advanced:
- diskType: SSD
- latency: 5ms
diff --git a/examples/opensds.conf b/examples/opensds.conf
deleted file mode 100755
index 4d2d2e810..000000000
--- a/examples/opensds.conf
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright 2018 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-[osdsapiserver]
-api_endpoint = localhost:50040
-auth_strategy = keystone
-# If https is enabled, the default value of cert file
-# is /opt/opensds-security/opensds/opensds-cert.pem,
-# and key file is /opt/opensds-security/opensds/opensds-key.pem
-https_enabled = False
-beego_https_cert_file =
-beego_https_key_file =
-# Encryption and decryption tool. Default value is aes.
-password_decrypt_tool = aes
-
-[osdslet]
-api_endpoint = localhost:50049
-
-[osdsdock]
-api_endpoint = localhost:50050
-# Choose the type of dock resource, only support 'provisioner' and 'attacher'.
-dock_type = provisioner
-# Specify which backends should be enabled, sample,ceph,cinder,lvm and so on.
-enabled_backends = sample
-
-[sample]
-name = sample
-description = Sample Test
-driver_name = sample
-
-[ceph]
-name = ceph
-description = Ceph Test
-driver_name = ceph
-config_path = /etc/opensds/driver/ceph.yaml
-
-[cinder]
-name = cinder
-description = Cinder Test
-driver_name = cinder
-config_path = /etc/opensds/driver/cinder.yaml
-
-[lvm]
-name = lvm
-description = LVM Test
-driver_name = lvm
-config_path = /etc/opensds/driver/lvm.yaml
-host_based_replication_driver = drbd
-
-[spectrumscale]
-name = spectrumscale
-description = IBM TEST
-driver_name = spectrumscale
-config_path = /etc/opensds/driver/spectrumscale.yaml
-
-[huawei_oceanstor_block]
-name = oceanstor
-description = oceanstor Test
-driver_name = huawei_oceanstor_block
-config_path = /etc/opensds/driver/huawei_oceanstor_block.yaml
-replication_type = array_based
-
-[huawei_fusionstorage]
-name = fusionstorage backend
-description = This is a fusionstorage backend service
-driver_name = huawei_fusionstorage
-config_path = /etc/opensds/driver/fusionstorage.yaml
-
-[netapp_ontap_san]
-name = netapp ontap san backend
-description = netapp ontap Test
-driver_name = netapp_ontap_san
-config_path = /etc/opensds/driver/netapp_ontap_san.yaml
-
-[database]
-endpoint = localhost:2379,localhost:2380
-driver = etcd
-username = username
-password = password
-enableTLS = False
-cert_file = /etc/etcd/server.crt
-key_file = /etc/etcd/server.key
-ca_file = /etc/etcd/ca.crt
-allowClientAuth = False
diff --git a/examples/policy.json b/examples/policy.json
deleted file mode 100644
index b6934e2b9..000000000
--- a/examples/policy.json
+++ /dev/null
@@ -1,70 +0,0 @@
-{
- "admin_or_owner": "is_admin:True or (role:admin and is_admin_project:True) or tenant_id:%(tenant_id)s",
- "default": "rule:admin_or_owner",
- "admin_api": "is_admin:True or (role:admin and is_admin_project:True)",
-
-
- "profile:create":"rule:admin_api",
- "profile:list":"",
- "profile:get":"",
- "profile:update":"rule:admin_api",
- "profile:delete":"rule:admin_api",
- "profile:add_custom_property": "rule:admin_api",
- "profile:list_custom_properties": "",
- "profile:remove_custom_property": "rule:admin_api",
- "volume:create": "rule:admin_or_owner",
- "volume:list": "rule:admin_or_owner",
- "volume:get": "rule:admin_or_owner",
- "volume:update": "rule:admin_or_owner",
- "volume:extend": "rule:admin_or_owner",
- "volume:delete": "rule:admin_or_owner",
- "volume:create_attachment": "rule:admin_or_owner",
- "volume:list_attachments": "rule:admin_or_owner",
- "volume:get_attachment": "rule:admin_or_owner",
- "volume:update_attachment": "rule:admin_or_owner",
- "volume:delete_attachment": "rule:admin_or_owner",
- "snapshot:create": "rule:admin_or_owner",
- "snapshot:list": "rule:admin_or_owner",
- "snapshot:get": "rule:admin_or_owner",
- "snapshot:update": "rule:admin_or_owner",
- "snapshot:delete": "rule:admin_or_owner",
- "dock:list": "rule:admin_api",
- "dock:get": "rule:admin_api",
- "pool:list": "rule:admin_api",
- "pool:get": "rule:admin_api",
- "replication:create": "rule:admin_or_owner",
- "replication:list": "rule:admin_or_owner",
- "replication:list_detail": "rule:admin_or_owner",
- "replication:get": "rule:admin_or_owner",
- "replication:update": "rule:admin_or_owner",
- "replication:delete": "rule:admin_or_owner",
- "replication:enable": "rule:admin_or_owner",
- "replication:disable": "rule:admin_or_owner",
- "replication:failover": "rule:admin_or_owner",
- "volume_group:create": "rule:admin_or_owner",
- "volume_group:list": "rule:admin_or_owner",
- "volume_group:get": "rule:admin_or_owner",
- "volume_group:update": "rule:admin_or_owner",
- "volume_group:delete": "rule:admin_or_owner",
- "availability_zone:list":"",
- "metrics:get": "rule:admin_or_owner",
- "metrics:collect": "rule:admin_or_owner",
- "metrics:uploadconf": "rule:admin_api",
- "metrics:downloadconf": "rule:admin_api",
- "metrics:urls": "rule:admin_or_owner",
- "fileshare:create": "rule:admin_or_owner",
- "fileshare:delete": "rule:admin_or_owner",
- "fileshare:list": "rule:admin_or_owner",
- "fileshare:get": "rule:admin_or_owner",
- "fileshare:update": "rule:admin_or_owner",
- "fileshareacl:create": "rule:admin_or_owner",
- "fileshareacl:delete": "rule:admin_or_owner",
- "fileshareacl:list": "rule:admin_or_owner",
- "fileshareacl:get": "rule:admin_or_owner",
- "host:update": "rule:admin_or_owner",
- "host:create": "rule:admin_or_owner",
- "host:delete": "rule:admin_or_owner",
- "host:list": "rule:admin_or_owner",
- "host:get": "rule:admin_or_owner",
- "host:update": "rule:admin_or_owner"
-}
diff --git a/go.mod b/go.mod
index 6f3dab326..8ea2ecee0 100644
--- a/go.mod
+++ b/go.mod
@@ -1,4 +1,4 @@
-module github.com/opensds/opensds
+module github.com/sodafoundation/controller
go 1.12
diff --git a/go.sum b/go.sum
index f75f938a3..778cb2d1f 100644
--- a/go.sum
+++ b/go.sum
@@ -5,6 +5,7 @@ github.com/Knetic/govaluate v3.0.0+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8L
github.com/LINBIT/godrbdutils v0.0.0-20180425110027-65b98a0f103a h1:eYm/KPAarghZq5BGxqEWyOcrIdA3lDya2wDDzIVgRe8=
github.com/LINBIT/godrbdutils v0.0.0-20180425110027-65b98a0f103a/go.mod h1:BwaNH2Y7xU4sn0OJj7EMT2ZkUjhBg3Lzp6Hae/0g2+Q=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/OwnLocal/goes v1.0.0/go.mod h1:8rIFjBGTue3lCU0wplczcUgt9Gxgrkkrw7etMIcn8TM=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/RoaringBitmap/roaring v0.4.21 h1:WJ/zIlNX4wQZ9x8Ey33O1UaD9TCTakYsdLFSBcTwH+8=
@@ -16,6 +17,7 @@ github.com/appleboy/easyssh-proxy v1.2.0 h1:KvaUGC18WkBFet+N1oofQy03jkC5HaKFn2XG
github.com/appleboy/easyssh-proxy v1.2.0/go.mod h1:vHskChUNhxwW4dXMe2MNE/k+UBCkBagrQDm70UWZrS0=
github.com/astaxie/beego v1.11.1 h1:6DESefxW5oMcRLFRKi53/6exzup/IR6N4EzzS1n6CnQ=
github.com/astaxie/beego v1.11.1/go.mod h1:i69hVzgauOPSw5qeyF4GVZhn7Od0yG5bbCGzmhbWxgQ=
+github.com/astaxie/beego v1.12.0/go.mod h1:fysx+LZNZKnvh4GED/xND7jWtjCR6HzydR2Hh2Im57o=
github.com/beego/goyaml2 v0.0.0-20130207012346-5545475820dd/go.mod h1:1b+Y/CofkYwXMUU0OhQqGvsY2Bvgr4j6jfT699wyZKQ=
github.com/beego/x2j v0.0.0-20131220205130-a0352aadc542/go.mod h1:kSeGC/p1AbBiEp5kat81+DSQrZenVBZXklMLaELspWU=
github.com/belogik/goes v0.0.0-20151229125003-e54d722c3aff/go.mod h1:PhH1ZhyCzHKt4uAasyx+ljRCgoezetRNf59CUtwUkqY=
diff --git a/install/CI/coverage b/install/CI/coverage
index 49324428d..042da4d04 100755
--- a/install/CI/coverage
+++ b/install/CI/coverage
@@ -17,8 +17,8 @@
set -e
echo "" > coverage.txt
-MODEL_PACKAGE=github.com/opensds/opensds/pkg/model
-PROTOBUF_PACKAGE=github.com/opensds/opensds/pkg/model/proto
+MODEL_PACKAGE=github.com/sodafoundation/controller/pkg/model
+PROTOBUF_PACKAGE=github.com/sodafoundation/controller/pkg/model/proto
for testpkg in $(go list ./osdsctl/... ./client/... ./pkg/... ./contrib/...); do
test $testpkg == "$MODEL_PACKAGE" && continue
diff --git a/install/CI/test b/install/CI/test
index ddb1ef228..e2d5a7b3d 100755
--- a/install/CI/test
+++ b/install/CI/test
@@ -39,26 +39,23 @@ fi
# Start unit test.
split_line "Start unit test"
-go test -v github.com/opensds/opensds/osdsctl/... -cover
-go test -v github.com/opensds/opensds/client/... -cover
-go test -v github.com/opensds/opensds/pkg/... -cover
-go test -v github.com/opensds/opensds/contrib/... -cover
-
-# Start integration test.
-split_line "Start integration test"
-sudo $OPENSDS_DIR/test/integration/prepare.sh
-go test -v github.com/opensds/opensds/test/integration/... -tags integration
-sudo killall -9 osdsapiserver osdslet osdsdock
-sudo rm /etc/opensds/opensds.conf -rf
-
-# Start lvm e2e test
-split_line "Start lvm e2e test"
-sudo $OPENSDS_DIR/install/devsds/install.sh
-ps -ef|grep osds
-go test -v github.com/opensds/opensds/test/e2e/... -tags e2e
-
-# Start lvm e2e flow test
-split_line "Start lvm e2e flow test"
-go build -o ./test/e2e/volume-connector github.com/opensds/opensds/test/e2e/connector/
-go test -v github.com/opensds/opensds/test/e2e/... -tags e2ef
-sudo $OPENSDS_DIR/install/devsds/uninstall.sh
+go test -v github.com/sodafoundation/controller/pkg... -cover
+
+## Start integration test.
+#split_line "Start integration test"
+#sudo $OPENSDS_DIR/test/integration/prepare.sh
+#go test -v github.com/sodafoundation/controller/test/integration/... -tags integration
+#sudo killall -9 osdsapiserver osdslet osdsdock
+#sudo rm /etc/opensds/opensds.conf -rf
+#
+## Start lvm e2e test
+#split_line "Start lvm e2e test"
+#sudo $OPENSDS_DIR/install/devsds/install.sh
+#ps -ef|grep osds
+#go test -v github.com/sodafoundation/controller/test/e2e/... -tags e2e
+#
+## Start lvm e2e flow test
+#split_line "Start lvm e2e flow test"
+#go build -o ./test/e2e/volume-connector github.com/sodafoundation/api/test/e2e/connector/
+#go test -v github.com/sodafoundation/api/test/e2e/... -tags e2ef
+#sudo $OPENSDS_DIR/install/devsds/uninstall.sh
diff --git a/install/devsds/bootstrap.sh b/install/devsds/bootstrap.sh
deleted file mode 100755
index 258266713..000000000
--- a/install/devsds/bootstrap.sh
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/bin/bash
-
-# Copyright 2017 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This script helps new contributors or users set up their local workstation
-# for opensds installation and development.
-
-# Temporary directory
-OPT_DIR=/opt/opensds
-mkdir -p $OPT_DIR
-
-# Golang version
-MINIMUM_GO_VERSION=${MINIMUM_GO_VERSION:-go1.11.1}
-GOENV_PROFILE=${GOENV_PROFILE:-/etc/profile.d/goenv.sh}
-
-# Log file
-LOG_DIR=/var/log/opensds
-LOGFILE=${LOGFILE:-/var/log/opensds/bootstrap.log}
-mkdir -p $LOG_DIR
-
-# Log function
-log() {
- DATE=`date "+%Y-%m-%d %H:%M:%S"`
- USER=$(whoami)
- echo "${DATE} [INFO] $@"
- echo "${DATE} ${USER} execute $0 [INFO] $@" > $LOGFILE
-}
-
-log_error ()
-{
- DATE=`date "+%Y-%m-%d %H:%M:%S"`
- USER=$(whoami)
- echo "${DATE} [ERROR] $@" 2>&1
- echo "${DATE} ${USER} execute $0 [ERROR] $@" > $LOGFILE
-}
-log OpenSDS bootstrap starting ...
-
-# load profile
-source /etc/profile
-
-# if not found, install it.
-if [[ -z "$(which go)" ]]; then
- log "Golang is not installed, downloading..."
- wget https://storage.googleapis.com/golang/${MINIMUM_GO_VERSION}.linux-amd64.tar.gz -O $OPT_DIR/${MINIMUM_GO_VERSION}.linux-amd64.tar.gz > /dev/null
- log "tar xzf $OPT_DIR/${MINIMUM_GO_VERSION}.linux-amd64.tar.gz -C /usr/local/"
- tar xzf $OPT_DIR/${MINIMUM_GO_VERSION}.linux-amd64.tar.gz -C /usr/local/
- echo 'export GOROOT=/usr/local/go' > $GOENV_PROFILE
- echo 'export GOPATH=$HOME/go' >> $GOENV_PROFILE
- echo 'export PATH=$PATH:$GOROOT/bin:$GOPATH/bin' >> $GOENV_PROFILE
- source $GOENV_PROFILE
-fi
-
-# verify go version
-IFS=" " read -ra go_version <<< "$(go version)"
-if [[ "${MINIMUM_GO_VERSION}" != $(echo -e "${MINIMUM_GO_VERSION}\n${go_version[2]}" | sort -s -t. -k 1,1 -k 2,2n -k 3,3n | head -n1) && "${go_version[2]}" != "devel" ]]; then
- log_error "Detected go version: ${go_version[*]}, OpenSDS requires ${MINIMUM_GO_VERSION} or greater."
- log_error "Please remove golang old version ${go_version[2]}, bootstrap will install ${MINIMUM_GO_VERSION} automatically"
- exit 2
-fi
-
-GOPATH=${GOPATH:-$HOME/go}
-OPENSDS_ROOT=${GOPATH}/src/github.com/opensds
-OPENSDS_DIR=${GOPATH}/src/github.com/opensds/opensds
-mkdir -p ${OPENSDS_ROOT}
-
-cd ${OPENSDS_ROOT}
-if [ ! -d ${OPENSDS_DIR} ]; then
- log "Downloading the OpenSDS source code..."
- git clone https://github.com/opensds/opensds.git -b master
-fi
-
-# make sure 'make' has been installed.
-if [[ -z "$(which make)" ]]; then
- log "Installing make ..."
- sudo apt-get install make -y
-fi
-
-cd ${OPENSDS_DIR}
-if [ ! -d ${OPENSDS_DIR}/build ]; then
- log "Building OpenSDS ..."
- make ubuntu-dev-setup
- make
-fi
-
-log OpenSDS bootstrapped successfully. you can execute 'source /etc/profile' to load golang ENV.
diff --git a/install/devsds/install.sh b/install/devsds/install.sh
deleted file mode 100755
index aa0adce95..000000000
--- a/install/devsds/install.sh
+++ /dev/null
@@ -1,193 +0,0 @@
-#!/bin/bash
-
-# Copyright 2017 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# default backend list
-OPENSDS_BACKEND_LIST=${OPENSDS_BACKEND_LIST:-lvm}
-
-osds::usage(){
- cat << OSDS_HELP_INFO_DOC
-Usage:
- $(basename $0) [-h|--help]
-Flags:
- -h, --help Print this information.
-To self-define install configuration, you can edit local.conf, here is config item blow:
- OPENSDS_AUTH_STRATEGY: Authentication strategy, value can be keystone, noauth.
- OPENSDS_BACKEND_LIST: Storage backend list, separated by a comma, support lvm right now.
- HOST_IP: It is used to service ip binding, including osdslet, osdsdock, etcd, keystone etc.
-OSDS_HELP_INFO_DOC
-}
-
-# Parse parameter first
-case "$# $1" in
- "0 ")
- echo "Starting install..."
- ;;
- "1 -h"|"2 --help")
- osds::usage
- exit 0
- ;;
- *)
- osds::usage
- exit 1
- ;;
-esac
-
-
-osds::backendlist_check(){
- local backendlist=$1
- for backend in $(echo $backendlist | tr "," " ");do
- case $backend in
- lvm|ceph|nfs)
- ;;
- *)
- echo "Error: backends must be one of lvm,ceph" >&2
- exit -1
- ;;
- esac
- done
-}
-
-# Print the commands being run so that we can see the command that triggers
-# an error. It is also useful for following along as the install occurs.
-set -o xtrace
-set -o errexit
-
-# Keep track of the script directory
-TOP_DIR=$(cd $(dirname "$0") && pwd)
-# OpenSDS source code root directory
-OPENSDS_DIR=$(cd $TOP_DIR/../.. && pwd)
-# OpenSDS configuration directory
-OPENSDS_CONFIG_DIR=${OPENSDS_CONFIG_DIR:-/etc/opensds}
-OPENSDS_DRIVER_CONFIG_DIR=${OPENSDS_CONFIG_DIR}/driver
-
-# Export openssl config file as environment variable
-export OPENSSL_CONF="${TOP_DIR}"/lib/openssl.cnf
-
-mkdir -p $OPENSDS_DRIVER_CONFIG_DIR
-
-# Temporary directory for testing
-OPT_DIR=/opt/opensds
-OPT_BIN=$OPT_DIR/bin
-mkdir -p $OPT_BIN
-export PATH=$OPT_BIN:$PATH
-
-# Echo text to the log file, summary log file and stdout
-# osds::echo_summary "something to say"
-function osds::echo_summary {
- echo -e $@ >&6
-}
-
-# Echo text only to stdout, no log files
-# osds::echo_nolog "something not for the logs"
-function osds::echo_nolog {
- echo $@ >&3
-}
-
-# Log file
-LOGFILE=/var/log/opensds/devsds.log
-TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"}
-LOGDAYS=${LOGDAYS:-7}
-CURRENT_LOG_TIME=$(date "+$TIMESTAMP_FORMAT")
-
-# Clean up old log files. Append '.*' to the user-specified
-# ``LOGFILE`` to match the date in the search template.
-LOGFILE_DIR="${LOGFILE%/*}" # dirname
-LOGFILE_NAME="${LOGFILE##*/}" # basename
-mkdir -p $LOGFILE_DIR
-find $LOGFILE_DIR -maxdepth 1 -name $LOGFILE_NAME.\* -mtime +$LOGDAYS -exec rm {} \;
-LOGFILE=$LOGFILE.${CURRENT_LOG_TIME}
-SUMFILE=$LOGFILE.summary.${CURRENT_LOG_TIME}
-
-# Before set log output, make sure python has already been installed.
-if [[ -z "$(which python)" ]]; then
- python_path=${python_path:-}
- test -n "$(which python2)" && python_path=$(which python2)
- test -n "$(which python3)" && python_path=$(which python3)
- if [[ -z $python_path ]]; then
- log_error "Can not find python, please install it."
- exit 2
- fi
- ln -s $python_path /usr/bin/python
-fi
-
-# Set fd 3 to a copy of stdout. So we can set fd 1 without losing
-# stdout later.
-exec 3>&1
-# Set fd 1 and 2 to write the log file
-exec 1> >( $TOP_DIR/tools/outfilter.py -v -o "${LOGFILE}" ) 2>&1
-# Set fd 6 to summary log file
-exec 6> >( $TOP_DIR/tools/outfilter.py -o "${SUMFILE}" )
-
-osds::echo_summary "install.sh log $LOGFILE"
-
-# Specified logfile name always links to the most recent log
-ln -sf $LOGFILE $LOGFILE_DIR/$LOGFILE_NAME
-ln -sf $SUMFILE $LOGFILE_DIR/$LOGFILE_NAME.summary
-
-source $TOP_DIR/lib/util.sh
-source $TOP_DIR/sdsrc
-
-osds::backendlist_check $OPENSDS_BACKEND_LIST
-
-# clean up opensds.conf
-:> $OPENSDS_CONFIG_DIR/opensds.conf
-
-# Install service which is enabled.
-osds::util::serice_operation install
-
-# Fin
-# ===
-
-set +o xtrace
-
-if [[ -n "$LOGFILE" ]]; then
- exec 1>&3
- # Force all output to stdout and logs now
- exec 1> >( tee -a "${LOGFILE}" ) 2>&1
-else
- # Force all output to stdout now
- exec 1>&3
-fi
-
-echo
-echo "Execute commands blow to set up ENVs which are needed by OpenSDS CLI:"
-echo "------------------------------------------------------------------"
-echo "export OPENSDS_AUTH_STRATEGY=$OPENSDS_AUTH_STRATEGY"
-echo "export OPENSDS_ENDPOINT=http://localhost:50040"
-if osds::util::is_service_enabled keystone; then
- if [ "true" == $USE_CONTAINER_KEYSTONE ]
- then
- echo "export OS_AUTH_URL=http://$KEYSTONE_IP/identity"
- echo "export OS_USERNAME=admin"
- echo "export OS_PASSWORD=opensds@123"
- echo "export OS_TENANT_NAME=admin"
- echo "export OS_PROJECT_NAME=admin"
- echo "export OS_USER_DOMAIN_ID=default"
- else
- echo "source $DEV_STACK_DIR/openrc admin admin"
- fi
-fi
-echo "------------------------------------------------------------------"
-echo "Enjoy it !!"
-echo
-
-# Restore/close logging file descriptors
-exec 1>&3
-exec 2>&3
-exec 3>&-
-exec 6>&-
-
-echo
diff --git a/install/devsds/lib/ceph.sh b/install/devsds/lib/ceph.sh
deleted file mode 100755
index c6ade448f..000000000
--- a/install/devsds/lib/ceph.sh
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/bin/bash
-
-# Copyright 2017 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Save trace setting
-_XTRACE_CEPH=$(set +o | grep xtrace)
-set +o xtrace
-
-osds::ceph::install(){
- : # TODO
-}
-
-osds::ceph::cleanup() {
- : # TODO
-}
-
-osds::ceph::uninstall(){
- : # TODO
-}
-
-osds::ceph::uninstall_purge(){
- : # TODO
-}
-
-# Restore xtrace
-$_XTRACE_CEPH
diff --git a/install/devsds/lib/certificate.sh b/install/devsds/lib/certificate.sh
deleted file mode 100755
index 0c2caba9f..000000000
--- a/install/devsds/lib/certificate.sh
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Save trace setting
-_XTRACE_CEPH=$(set +o | grep xtrace)
-set +o xtrace
-
-COMPONENT=("opensds" "nbp")
-OPENSDS_CERT_DIR="/opt/opensds-security"
-ROOT_CERT_DIR=${ROOT_CERT_DIR:-"${OPENSDS_CERT_DIR}"/ca}
-
-osds::certificate::install(){
- osds::certificate::cleanup
- osds::certificate::check_openssl_installed
- osds::certificate::prepare
- osds::certificate::create_ca_cert
- osds::certificate::create_component_cert
-}
-
-osds::certificate::cleanup() {
- osds::certificate::uninstall
-}
-
-osds::certificate::uninstall(){
- if [ -d "${OPENSDS_CERT_DIR}" ];then
- rm -rf "${OPENSDS_CERT_DIR}"
- fi
-}
-
-osds::certificate::uninstall_purge(){
- osds::certificate::uninstall
-}
-
-osds::certificate::check_openssl_installed(){
- openssl version >& /dev/null
- if [ $? -ne 0 ];then
- echo "Failed to run openssl. Please ensure openssl is installed."
- exit 1
- fi
-}
-
-osds::certificate::prepare(){
- # Prepare to generate certs
- mkdir -p "${ROOT_CERT_DIR}"
- mkdir -p "${ROOT_CERT_DIR}"/demoCA/
- mkdir -p "${ROOT_CERT_DIR}"/demoCA/newcerts
- touch "${ROOT_CERT_DIR}"/demoCA/index.txt
- echo "01" > "${ROOT_CERT_DIR}"/demoCA/serial
- echo "unique_subject = no" > "${ROOT_CERT_DIR}"/demoCA/index.txt.attr
-}
-
-osds::certificate::create_ca_cert(){
- # Create ca cert
- cd "${ROOT_CERT_DIR}"
- openssl genrsa -passout pass:xxxxx -out "${ROOT_CERT_DIR}"/ca-key.pem -aes256 2048
- openssl req -new -x509 -sha256 -key "${ROOT_CERT_DIR}"/ca-key.pem -out "${ROOT_CERT_DIR}"/ca-cert.pem -days 365 -subj "/CN=CA" -passin pass:xxxxx
-}
-
-osds::certificate::create_component_cert(){
- # Create component cert
- for com in ${COMPONENT[*]};do
- openssl genrsa -aes256 -passout pass:xxxxx -out "${ROOT_CERT_DIR}"/"${com}"-key.pem 2048
- openssl req -new -sha256 -key "${ROOT_CERT_DIR}"/"${com}"-key.pem -out "${ROOT_CERT_DIR}"/"${com}"-csr.pem -days 365 -subj "/CN=${com}" -passin pass:xxxxx
- openssl ca -batch -in "${ROOT_CERT_DIR}"/"${com}"-csr.pem -cert "${ROOT_CERT_DIR}"/ca-cert.pem -keyfile "${ROOT_CERT_DIR}"/ca-key.pem -out "${ROOT_CERT_DIR}"/"${com}"-cert.pem -md sha256 -days 365 -passin pass:xxxxx
-
- # Cancel the password for the private key
- openssl rsa -in "${ROOT_CERT_DIR}"/"${com}"-key.pem -out "${ROOT_CERT_DIR}"/"${com}"-key.pem -passin pass:xxxxx
-
- mkdir -p "${OPENSDS_CERT_DIR}"/"${com}"
- mv "${ROOT_CERT_DIR}"/"${com}"-key.pem "${OPENSDS_CERT_DIR}"/"${com}"/
- mv "${ROOT_CERT_DIR}"/"${com}"-cert.pem "${OPENSDS_CERT_DIR}"/"${com}"/
- rm -rf "${ROOT_CERT_DIR}"/"${com}"-csr.pem
- done
-
- rm -rf "${ROOT_CERT_DIR}"/demoCA
-}
-
-# Restore xtrace
-$_XTRACE_CEPH
diff --git a/install/devsds/lib/etcd.sh b/install/devsds/lib/etcd.sh
deleted file mode 100755
index dd94b6968..000000000
--- a/install/devsds/lib/etcd.sh
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/bash
-
-# Copyright 2017 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# A set of helpers for starting/running etcd for tests
-_XTRACE_ETCD=$(set +o | grep xtrace)
-set +o xtrace
-
-osds::etcd::stop() {
- kill "$(cat $ETCD_DIR/etcd.pid)" >/dev/null 2>&1 || :
- wait "$(cat $ETCD_DIR/etcd.pid)" >/dev/null 2>&1 || :
-}
-
-osds::etcd::clean_etcd_dir() {
- rm -rf "${ETCD_DIR-}"
-}
-
-osds::etcd::download() {
- (
- cd "${OPT_DIR}"
- url="https://github.com/coreos/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz"
- download_file="etcd-v${ETCD_VERSION}-linux-amd64.tar.gz"
- osds::util::download_file "${url}" "${download_file}"
- tar xzf "${download_file}"
- cp etcd-v${ETCD_VERSION}-linux-amd64/etcd bin
- cp etcd-v${ETCD_VERSION}-linux-amd64/etcdctl bin
- )
-}
-
-osds::etcd::install() {
- # validate before running
- if [ ! -f "${OPT_DIR}/bin/etcd" ]; then
- osds::etcd::download
- fi
-
- # Start etcd
- mkdir -p $ETCD_DIR
- nohup ${OPT_DIR}/bin/etcd --advertise-client-urls http://${ETCD_HOST}:${ETCD_PORT} --listen-client-urls http://${ETCD_HOST}:${ETCD_PORT}\
- --listen-peer-urls http://${ETCD_HOST}:${ETCD_PEER_PORT} --data-dir ${ETCD_DATADIR} --debug 2> "${ETCD_LOGFILE}" >/dev/null &
- echo $! > $ETCD_DIR/etcd.pid
-
- osds::echo_summary "Waiting for etcd to come up."
- osds::util::wait_for_url "http://${ETCD_HOST}:${ETCD_PORT}/v2/machines" "etcd: " 0.25 80
- curl -fs -X PUT "http://${ETCD_HOST}:${ETCD_PORT}/v2/keys/_test"
-}
-
-osds::etcd::cleanup() {
- osds::etcd::stop
- osds::etcd::clean_etcd_dir
-}
-
-osds::etcd::uninstall(){
- : # do nothing
-}
-
-osds::etcd::uninstall_purge(){
- : # do nothing
-}
-
-# Restore xtrace
-$_XTRACE_ETCD
diff --git a/install/devsds/lib/keystone.policy.json b/install/devsds/lib/keystone.policy.json
deleted file mode 100644
index 7a2c1c17c..000000000
--- a/install/devsds/lib/keystone.policy.json
+++ /dev/null
@@ -1,260 +0,0 @@
-{
- "admin_required": "role:admin",
- "cloud_admin": "role:admin and (is_admin_project:True or domain_id:admin_domain_id)",
- "service_role": "role:service",
- "service_or_admin": "rule:admin_required or rule:service_role",
- "owner": "user_id:%(user_id)s or user_id:%(target.token.user_id)s",
- "admin_or_owner": "(rule:admin_required and domain_id:%(target.token.user.domain.id)s) or rule:owner",
- "admin_and_matching_domain_id": "rule:admin_required and domain_id:%(domain_id)s",
- "service_admin_or_owner": "rule:service_or_admin or rule:owner",
-
- "default": "rule:admin_required",
-
- "identity:get_region": "",
- "identity:list_regions": "",
- "identity:create_region": "rule:cloud_admin",
- "identity:update_region": "rule:cloud_admin",
- "identity:delete_region": "rule:cloud_admin",
-
- "identity:get_service": "rule:admin_required",
- "identity:list_services": "rule:admin_required",
- "identity:create_service": "rule:cloud_admin",
- "identity:update_service": "rule:cloud_admin",
- "identity:delete_service": "rule:cloud_admin",
-
- "identity:get_endpoint": "rule:admin_required",
- "identity:list_endpoints": "rule:admin_required",
- "identity:create_endpoint": "rule:cloud_admin",
- "identity:update_endpoint": "rule:cloud_admin",
- "identity:delete_endpoint": "rule:cloud_admin",
-
- "identity:get_registered_limit": "",
- "identity:list_registered_limits": "",
- "identity:create_registered_limits": "rule:admin_required",
- "identity:update_registered_limits": "rule:admin_required",
- "identity:delete_registered_limit": "rule:admin_required",
-
- "identity:get_limit": "",
- "identity:list_limits": "",
- "identity:create_limits": "rule:admin_required",
- "identity:update_limits": "rule:admin_required",
- "identity:delete_limit": "rule:admin_required",
-
- "identity:get_domain": "rule:cloud_admin or rule:admin_and_matching_domain_id or token.project.domain.id:%(target.domain.id)s",
- "identity:list_domains": "rule:cloud_admin",
- "identity:create_domain": "rule:cloud_admin",
- "identity:update_domain": "rule:cloud_admin",
- "identity:delete_domain": "rule:cloud_admin",
-
- "admin_and_matching_target_project_domain_id": "rule:admin_required and domain_id:%(target.project.domain_id)s",
- "admin_and_matching_project_domain_id": "rule:admin_required and domain_id:%(project.domain_id)s",
- "identity:get_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id or project_id:%(target.project.id)s",
- "identity:list_projects": "rule:cloud_admin or rule:admin_and_matching_domain_id",
- "identity:list_user_projects": "rule:owner or rule:admin_and_matching_domain_id",
- "identity:create_project": "rule:cloud_admin or rule:admin_and_matching_project_domain_id",
- "identity:update_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id",
- "identity:delete_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id",
- "identity:create_project_tag": "rule:admin_required",
- "identity:delete_project_tag": "rule:admin_required",
- "identity:get_project_tag": "rule:admin_required",
- "identity:list_project_tags": "rule:admin_required",
- "identity:delete_project_tags": "rule:admin_required",
- "identity:update_project_tags": "rule:admin_required",
-
- "admin_and_matching_target_user_domain_id": "rule:admin_required and domain_id:%(target.user.domain_id)s",
- "admin_and_matching_user_domain_id": "rule:admin_required and domain_id:%(user.domain_id)s",
- "identity:get_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id or rule:owner",
- "identity:list_users": "rule:cloud_admin or rule:admin_and_matching_domain_id",
- "identity:create_user": "rule:cloud_admin or rule:admin_and_matching_user_domain_id",
- "identity:update_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id",
- "identity:delete_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id",
-
- "admin_and_matching_target_group_domain_id": "rule:admin_required and domain_id:%(target.group.domain_id)s",
- "admin_and_matching_group_domain_id": "rule:admin_required and domain_id:%(group.domain_id)s",
- "identity:get_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
- "identity:list_groups": "rule:cloud_admin or rule:admin_and_matching_domain_id",
- "identity:list_groups_for_user": "rule:owner or rule:admin_and_matching_target_user_domain_id",
- "identity:create_group": "rule:cloud_admin or rule:admin_and_matching_group_domain_id",
- "identity:update_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
- "identity:delete_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
- "identity:list_users_in_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
- "identity:remove_user_from_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
- "identity:check_user_in_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
- "identity:add_user_to_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
-
- "identity:list_credentials": "",
- "identity:get_credential": "rule:all_admins or user_id:%(target.credential.user_id)s",
- "identity:create_credential": "rule:all_admins or user_id:%(credential.user_id)s",
- "identity:delete_credential": "rule:all_admins or user_id:%(target.credential.user_id)s",
- "identity:update_credential": "rule:all_admins or (user_id:%(target.credential.user_id)s and user_id:%(credential.user_id)s)",
-
- "identity:ec2_get_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)",
- "identity:ec2_list_credentials": "rule:admin_required or rule:owner",
- "identity:ec2_create_credential": "rule:admin_required or rule:owner",
- "identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)",
-
- "identity:get_role": "rule:admin_required",
- "identity:list_roles": "rule:admin_required",
- "identity:create_role": "rule:cloud_admin",
- "identity:update_role": "rule:cloud_admin",
- "identity:delete_role": "rule:cloud_admin",
-
- "identity:get_domain_role": "rule:cloud_admin or rule:get_domain_roles",
- "identity:list_domain_roles": "rule:cloud_admin or rule:list_domain_roles",
- "identity:create_domain_role": "rule:cloud_admin or rule:domain_admin_matches_domain_role",
- "identity:update_domain_role": "rule:cloud_admin or rule:domain_admin_matches_target_domain_role",
- "identity:delete_domain_role": "rule:cloud_admin or rule:domain_admin_matches_target_domain_role",
- "domain_admin_matches_domain_role": "rule:admin_required and domain_id:%(role.domain_id)s",
- "get_domain_roles": "rule:domain_admin_matches_target_domain_role or rule:project_admin_matches_target_domain_role",
- "domain_admin_matches_target_domain_role": "rule:admin_required and domain_id:%(target.role.domain_id)s",
- "project_admin_matches_target_domain_role": "rule:admin_required and project_domain_id:%(target.role.domain_id)s",
- "list_domain_roles": "rule:domain_admin_matches_filter_on_list_domain_roles or rule:project_admin_matches_filter_on_list_domain_roles",
- "domain_admin_matches_filter_on_list_domain_roles": "rule:admin_required and domain_id:%(domain_id)s",
- "project_admin_matches_filter_on_list_domain_roles": "rule:admin_required and project_domain_id:%(domain_id)s",
- "admin_and_matching_prior_role_domain_id": "rule:admin_required and domain_id:%(target.prior_role.domain_id)s",
- "implied_role_matches_prior_role_domain_or_global": "(domain_id:%(target.implied_role.domain_id)s or None:%(target.implied_role.domain_id)s)",
-
- "identity:get_implied_role": "rule:cloud_admin or rule:admin_and_matching_prior_role_domain_id",
- "identity:list_implied_roles": "rule:cloud_admin or rule:admin_and_matching_prior_role_domain_id",
- "identity:create_implied_role": "rule:cloud_admin or (rule:admin_and_matching_prior_role_domain_id and rule:implied_role_matches_prior_role_domain_or_global)",
- "identity:delete_implied_role": "rule:cloud_admin or rule:admin_and_matching_prior_role_domain_id",
- "identity:list_role_inference_rules": "rule:cloud_admin",
- "identity:check_implied_role": "rule:cloud_admin or rule:admin_and_matching_prior_role_domain_id",
-
- "identity:list_system_grants_for_user": "rule:admin_required",
- "identity:check_system_grant_for_user": "rule:admin_required",
- "identity:create_system_grant_for_user": "rule:admin_required",
- "identity:revoke_system_grant_for_user": "rule:admin_required",
-
- "identity:list_system_grants_for_group": "rule:admin_required",
- "identity:check_system_grant_for_group": "rule:admin_required",
- "identity:create_system_grant_for_group": "rule:admin_required",
- "identity:revoke_system_grant_for_group": "rule:admin_required",
-
- "identity:check_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants",
- "identity:list_grants": "rule:cloud_admin or rule:domain_admin_for_list_grants or rule:project_admin_for_list_grants",
- "identity:create_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants",
- "identity:revoke_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants",
- "domain_admin_for_grants": "rule:domain_admin_for_global_role_grants or rule:domain_admin_for_domain_role_grants",
- "domain_admin_for_global_role_grants": "rule:admin_required and None:%(target.role.domain_id)s and rule:domain_admin_grant_match",
- "domain_admin_for_domain_role_grants": "rule:admin_required and domain_id:%(target.role.domain_id)s and rule:domain_admin_grant_match",
- "domain_admin_grant_match": "domain_id:%(domain_id)s or domain_id:%(target.project.domain_id)s",
- "project_admin_for_grants": "rule:project_admin_for_global_role_grants or rule:project_admin_for_domain_role_grants",
- "project_admin_for_global_role_grants": "rule:admin_required and None:%(target.role.domain_id)s and project_id:%(project_id)s",
- "project_admin_for_domain_role_grants": "rule:admin_required and project_domain_id:%(target.role.domain_id)s and project_id:%(project_id)s",
- "domain_admin_for_list_grants": "rule:admin_required and rule:domain_admin_grant_match",
- "project_admin_for_list_grants": "rule:admin_required and project_id:%(project_id)s",
-
- "admin_on_domain_filter": "rule:admin_required and domain_id:%(scope.domain.id)s",
- "admin_on_project_filter": "rule:admin_required and project_id:%(scope.project.id)s",
- "admin_on_domain_of_project_filter": "rule:admin_required and domain_id:%(target.project.domain_id)s",
- "identity:list_role_assignments": "rule:cloud_admin or rule:admin_on_domain_filter or rule:admin_on_project_filter",
- "identity:list_role_assignments_for_tree": "rule:cloud_admin or rule:admin_on_domain_of_project_filter",
- "identity:get_policy": "rule:cloud_admin",
- "identity:list_policies": "rule:cloud_admin",
- "identity:create_policy": "rule:cloud_admin",
- "identity:update_policy": "rule:cloud_admin",
- "identity:delete_policy": "rule:cloud_admin",
-
- "identity:check_token": "rule:admin_or_owner",
- "identity:validate_token": "rule:service_admin_or_owner",
- "identity:validate_token_head": "rule:service_or_admin",
- "identity:revocation_list": "rule:service_or_admin",
- "identity:revoke_token": "rule:admin_or_owner",
-
- "identity:create_trust": "user_id:%(trust.trustor_user_id)s",
- "identity:list_trusts": "",
- "identity:list_roles_for_trust": "",
- "identity:get_role_for_trust": "",
- "identity:delete_trust": "",
- "identity:get_trust": "",
-
- "identity:create_consumer": "rule:admin_required",
- "identity:get_consumer": "rule:admin_required",
- "identity:list_consumers": "rule:admin_required",
- "identity:delete_consumer": "rule:admin_required",
- "identity:update_consumer": "rule:admin_required",
-
- "identity:authorize_request_token": "rule:admin_required",
- "identity:list_access_token_roles": "rule:admin_required",
- "identity:get_access_token_role": "rule:admin_required",
- "identity:list_access_tokens": "rule:admin_required",
- "identity:get_access_token": "rule:admin_required",
- "identity:delete_access_token": "rule:admin_required",
-
- "identity:list_projects_for_endpoint": "rule:admin_required",
- "identity:add_endpoint_to_project": "rule:admin_required",
- "identity:check_endpoint_in_project": "rule:admin_required",
- "identity:list_endpoints_for_project": "rule:admin_required",
- "identity:remove_endpoint_from_project": "rule:admin_required",
-
- "identity:create_endpoint_group": "rule:admin_required",
- "identity:list_endpoint_groups": "rule:admin_required",
- "identity:get_endpoint_group": "rule:admin_required",
- "identity:update_endpoint_group": "rule:admin_required",
- "identity:delete_endpoint_group": "rule:admin_required",
- "identity:list_projects_associated_with_endpoint_group": "rule:admin_required",
- "identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required",
- "identity:get_endpoint_group_in_project": "rule:admin_required",
- "identity:list_endpoint_groups_for_project": "rule:admin_required",
- "identity:add_endpoint_group_to_project": "rule:admin_required",
- "identity:remove_endpoint_group_from_project": "rule:admin_required",
-
- "identity:create_identity_provider": "rule:cloud_admin",
- "identity:list_identity_providers": "rule:cloud_admin",
- "identity:get_identity_provider": "rule:cloud_admin",
- "identity:update_identity_provider": "rule:cloud_admin",
- "identity:delete_identity_provider": "rule:cloud_admin",
-
- "identity:create_protocol": "rule:cloud_admin",
- "identity:update_protocol": "rule:cloud_admin",
- "identity:get_protocol": "rule:cloud_admin",
- "identity:list_protocols": "rule:cloud_admin",
- "identity:delete_protocol": "rule:cloud_admin",
-
- "identity:create_mapping": "rule:cloud_admin",
- "identity:get_mapping": "rule:cloud_admin",
- "identity:list_mappings": "rule:cloud_admin",
- "identity:delete_mapping": "rule:cloud_admin",
- "identity:update_mapping": "rule:cloud_admin",
-
- "identity:create_service_provider": "rule:cloud_admin",
- "identity:list_service_providers": "rule:cloud_admin",
- "identity:get_service_provider": "rule:cloud_admin",
- "identity:update_service_provider": "rule:cloud_admin",
- "identity:delete_service_provider": "rule:cloud_admin",
-
- "identity:get_auth_catalog": "",
- "identity:get_auth_projects": "",
- "identity:get_auth_domains": "",
- "identity:get_auth_system": "",
-
- "identity:list_projects_for_user": "",
- "identity:list_domains_for_user": "",
-
- "identity:list_revoke_events": "rule:service_or_admin",
-
- "identity:create_policy_association_for_endpoint": "rule:cloud_admin",
- "identity:check_policy_association_for_endpoint": "rule:cloud_admin",
- "identity:delete_policy_association_for_endpoint": "rule:cloud_admin",
- "identity:create_policy_association_for_service": "rule:cloud_admin",
- "identity:check_policy_association_for_service": "rule:cloud_admin",
- "identity:delete_policy_association_for_service": "rule:cloud_admin",
- "identity:create_policy_association_for_region_and_service": "rule:cloud_admin",
- "identity:check_policy_association_for_region_and_service": "rule:cloud_admin",
- "identity:delete_policy_association_for_region_and_service": "rule:cloud_admin",
- "identity:get_policy_for_endpoint": "rule:cloud_admin",
- "identity:list_endpoints_for_policy": "rule:cloud_admin",
-
- "identity:create_domain_config": "rule:cloud_admin",
- "identity:get_domain_config": "rule:cloud_admin",
- "identity:get_security_compliance_domain_config": "",
- "identity:update_domain_config": "rule:cloud_admin",
- "identity:delete_domain_config": "rule:cloud_admin",
- "identity:get_domain_config_default": "rule:cloud_admin",
-
- "identity:get_application_credential": "rule:admin_or_owner",
- "identity:list_application_credentials": "rule:admin_or_owner",
- "identity:create_application_credential": "rule:admin_or_owner",
- "identity:delete_application_credential": "rule:admin_or_owner"
-}
diff --git a/install/devsds/lib/keystone.sh b/install/devsds/lib/keystone.sh
deleted file mode 100755
index d96ca22b0..000000000
--- a/install/devsds/lib/keystone.sh
+++ /dev/null
@@ -1,193 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright 2018 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-_XTRACE_KEYSTONE=$(set +o | grep xtrace)
-set +o xtrace
-
-# 'stack' user is just for install keystone through devstack
-osds::keystone::create_user(){
- if id ${STACK_USER_NAME} &> /dev/null; then
- return
- fi
- sudo useradd -s /bin/bash -d ${STACK_HOME} -m ${STACK_USER_NAME}
- echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack
-}
-
-
-osds::keystone::remove_user(){
- userdel ${STACK_USER_NAME} -f -r
- rm /etc/sudoers.d/stack
-}
-
-osds::keystone::devstack_local_conf(){
-DEV_STACK_LOCAL_CONF=${DEV_STACK_DIR}/local.conf
-cat > $DEV_STACK_LOCAL_CONF << DEV_STACK_LOCAL_CONF_DOCK
-[[local|localrc]]
-# use TryStack git mirror
-GIT_BASE=$STACK_GIT_BASE
-
-# If the ``*_PASSWORD`` variables are not set here you will be prompted to enter
-# values for them by ``stack.sh``and they will be added to ``local.conf``.
-ADMIN_PASSWORD=$STACK_PASSWORD
-DATABASE_PASSWORD=$STACK_PASSWORD
-RABBIT_PASSWORD=$STACK_PASSWORD
-SERVICE_PASSWORD=$STACK_PASSWORD
-
-# Neither is set by default.
-HOST_IP=$HOST_IP
-
-# path of the destination log file. A timestamp will be appended to the given name.
-LOGFILE=\$DEST/logs/stack.sh.log
-
-# Old log files are automatically removed after 7 days to keep things neat. Change
-# the number of days by setting ``LOGDAYS``.
-LOGDAYS=2
-
-ENABLED_SERVICES=mysql,key
-# Using stable/queens branches
-# ---------------------------------
-KEYSTONE_BRANCH=$STACK_BRANCH
-KEYSTONECLIENT_BRANCH=$STACK_BRANCH
-DEV_STACK_LOCAL_CONF_DOCK
-chown stack:stack $DEV_STACK_LOCAL_CONF
-}
-
-osds::keystone::opensds_conf() {
-cat >> $OPENSDS_CONFIG_DIR/opensds.conf << OPENSDS_GLOBAL_CONFIG_DOC
-[keystone_authtoken]
-memcached_servers = $KEYSTONE_IP:11211
-signing_dir = /var/cache/opensds
-cafile = /opt/stack/data/ca-bundle.pem
-auth_uri = http://$KEYSTONE_IP/identity
-project_domain_name = Default
-project_name = service
-user_domain_name = Default
-password = $STACK_PASSWORD
-# Whether to encrypt the password. If enabled, the value of the password must be ciphertext.
-enable_encrypted = False
-# Encryption and decryption tool. Default value is aes. The decryption tool can only decrypt the corresponding ciphertext.
-pwd_encrypter = aes
-username = $OPENSDS_SERVER_NAME
-auth_url = http://$KEYSTONE_IP/identity
-auth_type = password
-
-OPENSDS_GLOBAL_CONFIG_DOC
-
-cp $OPENSDS_DIR/examples/policy.json $OPENSDS_CONFIG_DIR
-}
-
-osds::keystone::create_user_and_endpoint(){
- . $DEV_STACK_DIR/openrc admin admin
- openstack user create --domain default --password $STACK_PASSWORD $OPENSDS_SERVER_NAME
- openstack role add --project service --user opensds admin
- openstack group create service
- openstack group add user service opensds
- openstack role add service --project service --group service
- openstack group add user admins admin
- openstack service create --name opensds$OPENSDS_VERSION --description "OpenSDS Block Storage" opensds$OPENSDS_VERSION
- openstack endpoint create --region RegionOne opensds$OPENSDS_VERSION public http://$HOST_IP:50040/$OPENSDS_VERSION/%\(tenant_id\)s
- openstack endpoint create --region RegionOne opensds$OPENSDS_VERSION internal http://$HOST_IP:50040/$OPENSDS_VERSION/%\(tenant_id\)s
- openstack endpoint create --region RegionOne opensds$OPENSDS_VERSION admin http://$HOST_IP:50040/$OPENSDS_VERSION/%\(tenant_id\)s
-}
-
-osds::keystone::delete_user(){
- . $DEV_STACK_DIR/openrc admin admin
- openstack service delete opensds$OPENSDS_VERSION
- openstack role remove service --project service --group service
- openstack group remove user service opensds
- openstack group delete service
- openstack user delete $OPENSDS_SERVER_NAME --domain default
-}
-
-osds::keystone::delete_redundancy_data() {
- . $DEV_STACK_DIR/openrc admin admin
- openstack project delete demo
- openstack project delete alt_demo
- openstack project delete invisible_to_admin
- openstack user delete demo
- openstack user delete alt_demo
-}
-
-osds::keystone::download_code(){
- if [ ! -d ${DEV_STACK_DIR} ];then
- git clone ${STACK_GIT_BASE}/openstack-dev/devstack.git -b ${STACK_BRANCH} ${DEV_STACK_DIR}
- chown stack:stack -R ${DEV_STACK_DIR}
- fi
-
-}
-
-osds::keystone::install(){
- if [ "true" == $USE_CONTAINER_KEYSTONE ]
- then
- KEYSTONE_IP=$HOST_IP
- docker pull opensdsio/opensds-authchecker:latest
- docker run -d --privileged=true --net=host --name=opensds-authchecker opensdsio/opensds-authchecker:latest
- osds::keystone::opensds_conf
- docker cp $TOP_DIR/lib/keystone.policy.json opensds-authchecker:/etc/keystone/policy.json
- else
- if [ "true" != $USE_EXISTING_KEYSTONE ]
- then
- KEYSTONE_IP=$HOST_IP
- osds::keystone::create_user
- osds::keystone::download_code
- osds::keystone::opensds_conf
-
- # If keystone is ready to start, there is no need continue next step.
- if osds::util::wait_for_url http://$HOST_IP/identity "keystone" 0.25 4; then
- return
- fi
- osds::keystone::devstack_local_conf
- cd ${DEV_STACK_DIR}
- su $STACK_USER_NAME -c ${DEV_STACK_DIR}/stack.sh
- osds::keystone::create_user_and_endpoint
- osds::keystone::delete_redundancy_data
- # add opensds customize policy.json for keystone
- cp $TOP_DIR/lib/keystone.policy.json /etc/keystone/policy.json
- else
- osds::keystone::opensds_conf
- cd ${DEV_STACK_DIR}
- osds::keystone::create_user_and_endpoint
- fi
- fi
-}
-
-osds::keystone::cleanup() {
- : #do nothing
-}
-
-osds::keystone::uninstall(){
- if [ "true" == $USE_CONTAINER_KEYSTONE ]
- then
- docker stop opensds-authchecker
- docker rm opensds-authchecker
- else
- if [ "true" != $USE_EXISTING_KEYSTONE ]
- then
- su $STACK_USER_NAME -c ${DEV_STACK_DIR}/unstack.sh
- else
- osds::keystone::delete_user
- fi
- fi
-}
-
-osds::keystone::uninstall_purge(){
- rm $STACK_HOME/* -rf
- osds::keystone::remove_user
-}
-
-## Restore xtrace
-$_XTRACE_KEYSTONE
diff --git a/install/devsds/lib/lvm.sh b/install/devsds/lib/lvm.sh
deleted file mode 100755
index cde31114a..000000000
--- a/install/devsds/lib/lvm.sh
+++ /dev/null
@@ -1,430 +0,0 @@
-#!/bin/bash
-
-# Copyright 2017 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Save trace setting
-_XTRACE_LVM=$(set +o | grep xtrace)
-set +o xtrace
-
-# Defaults
-# --------
-# Name of the lvm volume groups to use/create for iscsi volumes
-VOLUME_GROUP_NAME=${VOLUME_GROUP_NAME:-opensds-volumes}
-FILE_GROUP_NAME=${FILE_GROUP_NAME:-opensds-files}
-FILE_VOLUME_GROUP_NAME=${VOLUME_GROUP_NAME:-opensds-files}
-DEFAULT_VOLUME_GROUP_NAME=$VOLUME_GROUP_NAME-default
-FILE_DEFAULT_VOLUME_GROUP_NAME=$FILE_GROUP_NAME-default
-
-# Name of lvm nvme volume group to use/create for nvme volumes
-NVME_VOLUME_GROUP_NAME=$VOLUME_GROUP_NAME-nvme
-# Backing file name is of the form $VOLUME_GROUP$BACKING_FILE_SUFFIX
-BACKING_FILE_SUFFIX=-backing-file
-# Default volume size
-VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-20G}
-LVM_DIR=$OPT_DIR/lvm
-DATA_DIR=$LVM_DIR
-mkdir -p $LVM_DIR
-
-FILE_LVM_DIR=$OPT_DIR/nfs
-FILE_DATA_DIR=$FILE_LVM_DIR
-mkdir -p $FILE_LVM_DIR
-
-
-# nvme dir
-NVME_DIR=/opt/opensdsNvme
-# nvme device
-LVM_DEVICE=/dev/nvme0n1
-
-osds::lvm::pkg_install(){
- sudo apt-get install -y lvm2 tgt open-iscsi ibverbs-utils
-}
-
-osds::nfs::pkg_install(){
- sudo apt-get install -y nfs-kernel-server
-}
-
-osds::lvm::pkg_uninstall(){
- sudo apt-get purge -y lvm2 tgt open-iscsi ibvverbs-utils
-}
-
-osds::lvm::nvmeofpkginstall(){
- # nvme-cli utility for nvmeof initiator
- wget https://github.com/linux-nvme/nvme-cli/archive/v1.8.1.tar.gz -O /opt/nvmecli-1.8.1.tar.gz
- sudo tar -zxvf /opt/nvmecli-1.8.1.tar.gz -C /opt/
- cd /opt/nvme-cli-1.8.1 && sudo make && sudo make install
- # nvme kernel
- sudo modprobe nvmet
- sudo modprobe nvme-tcp
- sudo modprobe nvmet-tcp
- sudo modprobe nvme-rdma
- sudo modprobe nvmet-rdma
- sudo modprobe nvme-fc
- sudo modprobe nvmet-fc
-}
-
-osds::lvm::nvmeofpkguninstall(){
- sudo nvme disconnect-all
- sudo modprobe -r nvme-rdma
- sudo modprobe -r nvmet-rdma
- sudo modprobe -r nvme-tcp
- sudo modprobe -r nvmet-tcp
- sudo modprobe -r nvme-fc
- sudo modprobe -r nvmet-fc
- sudo modprobe -r nvmet
-}
-
-osds::lvm::create_volume_group(){
- local vg=$1
- local size=$2
-
- local backing_file=$DATA_DIR/$vg$BACKING_FILE_SUFFIX
- if ! sudo vgs $vg; then
- # Only create if the file doesn't already exists
- [[ -f $backing_file ]] || truncate -s $size $backing_file
- local vg_dev
- vg_dev=`sudo losetup -f --show $backing_file`
-
- # Only create volume group if it doesn't already exist
- if ! sudo vgs $vg; then
- sudo vgcreate $vg $vg_dev
- fi
- fi
-}
-
-osds::lvm::create_volume_group_for_file(){
- local fvg="opensds-files-default"
- local size=$2
-
- local backing_file=$FILE_DATA_DIR/$fvg$BACKING_FILE_SUFFIX
- if ! sudo vgs $fvg; then
- # Only create if the file doesn't already exists
- [[ -f $backing_file ]] || truncate -s $size $backing_file
- local vg_dev
- vg_dev=`sudo losetup -f --show $backing_file`
-
- # Only create volume group if it doesn't already exist
- if ! sudo vgs $fvg; then
- sudo vgcreate $fvg $vg_dev
- fi
- fi
-}
-
-osds::lvm::create_nvme_vg(){
- local vg=$1
- local size=$2
- cap=$(parted $LVM_DEVICE unit GB print free | grep 'Free Space' | tail -n1 | awk '{print $3}')
- if [ cap > '$size' ];then
- # Only create if the file doesn't already exists
- # create volume group and prepare kernel module
- sudo mkdir -p $NVME_DIR/$vg
- sudo mount $LVM_DEVICE $NVME_DIR/$vg
- local backing_file=$NVME_DIR/$vg/$vg$BACKING_FILE_SUFFIX
- if ! sudo vgs $vg; then
- # Only create if the file doesn't already exists
- [[ -f $backing_file ]] || truncate -s $size $backing_file
- local vg_dev
- vg_dev=`sudo losetup -f --show $backing_file`
-
- # Only create physical volume if it doesn't already exist
- if ! sudo pvs $vg_dev; then
- sudo pvcreate $vg_dev
- fi
-
- # Only create volume group if it doesn't already exist
- if ! sudo vgs $vg; then
- sudo vgcreate $vg $vg_dev
- fi
- fi
- else
- echo "disk $LVM_DEVICE does not have enough space"
- fi
-}
-
-osds::lvm::set_configuration(){
-cat > $OPENSDS_DRIVER_CONFIG_DIR/lvm.yaml << OPENSDS_LVM_CONFIG_DOC
-tgtBindIp: $HOST_IP
-tgtConfDir: /etc/tgt/conf.d
-pool:
- $DEFAULT_VOLUME_GROUP_NAME:
- diskType: NL-SAS
- availabilityZone: default
- multiAttach: true
- storageType: block
- extras:
- dataStorage:
- provisioningPolicy: Thin
- compression: false
- deduplication: false
- ioConnectivity:
- accessProtocol: iscsi
- maxIOPS: 7000000
- maxBWS: 600
- minIOPS: 1000000
- minBWS: 100
- latency: 100
- advanced:
- diskType: SSD
- latency: 5ms
-OPENSDS_LVM_CONFIG_DOC
-
-cat >> $OPENSDS_CONFIG_DIR/opensds.conf << OPENSDS_LVM_GLOBAL_CONFIG_DOC
-[lvm]
-name = lvm
-description = LVM Test
-driver_name = lvm
-config_path = /etc/opensds/driver/lvm.yaml
-
-OPENSDS_LVM_GLOBAL_CONFIG_DOC
-}
-
-osds::lvm::set_configuration_for_file(){
-cat > $OPENSDS_DRIVER_CONFIG_DIR/nfs.yaml << OPENSDS_FILE_CONFIG_DOC
-tgtBindIp: $HOST_IP
-tgtConfDir: /etc/tgt/conf.d
-pool:
- $FILE_DEFAULT_VOLUME_GROUP_NAME:
- diskType: NL-SAS
- availabilityZone: default
- multiAttach: true
- storageType: file
- extras:
- dataStorage:
- provisioningPolicy: Thin
- compression: false
- deduplication: false
- storageAccessCapability:
- - Read
- - Write
- - Execute
- ioConnectivity:
- accessProtocol: nfs
- maxIOPS: 7000000
- maxBWS: 600
- minIOPS: 1000000
- minBWS: 100
- latency: 100
- advanced:
- diskType: SSD
- latency: 5ms
-OPENSDS_FILE_CONFIG_DOC
-
-cat >> $OPENSDS_CONFIG_DIR/opensds.conf << OPENSDS_FILE_GLOBAL_CONFIG_DOC
-[nfs]
-name = nfs
-description = NFS LVM TEST
-driver_name = nfs
-config_path = /etc/opensds/driver/nfs.yaml
-
-OPENSDS_FILE_GLOBAL_CONFIG_DOC
-}
-
-osds::lvm::set_nvme_configuration(){
-cat >> $OPENSDS_DRIVER_CONFIG_DIR/lvm.yaml << OPENSDS_LVM_CONFIG_DOC
-
- $NVME_VOLUME_GROUP_NAME:
- diskType: NL-SAS
- availabilityZone: default
- multiAttach: true
- storageType: block
- extras:
- dataStorage:
- provisioningPolicy: Thin
- compression: false
- deduplication: false
- ioConnectivity:
- accessProtocol: nvmeof
- maxIOPS: 7000000
- maxBWS: 600
- minIOPS: 1000000
- minBWS: 100
- latency: 100
- advanced:
- diskType: SSD
- latency: 20us
-OPENSDS_LVM_CONFIG_DOC
-}
-
-osds::lvm::remove_volumes() {
- local vg=$1
-
- # Clean out existing volumes
- sudo lvremove -f $vg
-}
-
-osds::lvm::remove_volume_group() {
- local vg=$1
-
- # Remove the volume group
- sudo vgremove -f $vg
-}
-osds::lvm::remove_volume_group_for_file() {
- local fvg="opensds-files-default"
- # Remove the volume group
- sudo vgremove -f $fvg
-}
-
-osds::lvm::clean_backing_file() {
- local backing_file=$1
- # If the backing physical device is a loop device, it was probably setup by DevStack
- if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then
- local vg_dev
- vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'$BACKING_FILE_SUFFIX'/ { print $1}')
- if [[ -n "$vg_dev" ]]; then
- sudo losetup -d $vg_dev
- fi
- rm -f $backing_file
- fi
-}
-
-osds::lvm::clean_volume_group_for_file() {
- local fvg="opensds-files-default"
- osds::lvm::remove_volume_group_for_file $fvg
- # if there is no logical volume left, it's safe to attempt a cleanup
- # of the backing file
- if [[ -z "$(sudo lvs --noheadings -o lv_name $fvg 2>/dev/null)" ]]; then
- osds::lvm::clean_backing_file $FILE_DATA_DIR/$fvg$BACKING_FILE_SUFFIX
- fi
-}
-
-osds::lvm::clean_volume_group() {
- local vg=$1
- osds::lvm::remove_volumes $vg
- osds::lvm::remove_volume_group $vg
- osds::lvm::remove_volume_group_for_file $fvg
- # if there is no logical volume left, it's safe to attempt a cleanup
- # of the backing file
- if [[ -z "$(sudo lvs --noheadings -o lv_name $vg 2>/dev/null)" ]]; then
- osds::lvm::clean_backing_file $DATA_DIR/$vg$BACKING_FILE_SUFFIX
- fi
-}
-
-osds::lvm::clean_nvme_volume_group(){
- local nvmevg=$1
- echo "nvme pool ${nvmevg}"
- osds::lvm::remove_volumes $nvmevg
- osds::lvm::remove_volume_group $nvmevg
- # if there is no logical volume left, it's safe to attempt a cleanup
- # of the backing file
- if [[ -z "$(sudo lvs --noheadings -o lv_name $nvmevg 2>/dev/null)" ]]; then
- osds::lvm::clean_backing_file $NVME_DIR/$nvmevg/$nvmevg$BACKING_FILE_SUFFIX
- fi
- ## umount nvme disk and remove corresponding dir
- for i in {1..10}
- do
- # 'umount -l' can umount even if target is busy
- sudo umount -l $NVME_DIR/$nvmevg
- if [ $? -eq 0 ]; then
- sudo rmdir $NVME_DIR/$nvmevg
- sudo rmdir $NVME_DIR
- echo "umount & removement succeed"
- return 0
- fi
- sleep 1
- done
- echo "umount failed after retry 10 times"
- echo "please check if there are any remaining attachments and umount dir ${NVME_DIRi}/${nvmevg} manually"
-}
-
-
-# osds::lvm::clean_lvm_filter() Remove the filter rule set in set_lvm_filter()
-
-osds::lvm::clean_lvm_filter() {
- sudo sed -i "s/^.*# from devsds$//" /etc/lvm/lvm.conf
-}
-
-# osds::lvm::set_lvm_filter() Gather all devices configured for LVM and
-# use them to build a global device filter
-# osds::lvm::set_lvm_filter() Create a device filter
-# and add to /etc/lvm.conf. Note this uses
-# all current PV's in use by LVM on the
-# system to build it's filter.
-osds::lvm::set_lvm_filter() {
- local filter_suffix='"r|.*|" ] # from devsds'
- local filter_string="global_filter = [ "
- local pv
- local vg
- local line
-
- for pv_info in $(sudo pvs --noheadings -o name); do
- pv=$(echo -e "${pv_info}" | sed 's/ //g' | sed 's/\/dev\///g')
- new="\"a|$pv|\", "
- filter_string=$filter_string$new
- done
- filter_string=$filter_string$filter_suffix
-
- osds::lvm::clean_lvm_filter
- sudo sed -i "/# global_filter = \[.*\]/a\ $global_filter$filter_string" /etc/lvm/lvm.conf
- osds::echo_summary "set lvm.conf device global_filter to: $filter_string"
-}
-
-
-osds::lvm::install() {
- local vg=$DEFAULT_VOLUME_GROUP_NAME
- local fvg=$FILE_DEFAULT_VOLUME_GROUP_NAME
- local size=$VOLUME_BACKING_FILE_SIZE
-
- # Install lvm relative packages.
- osds::lvm::pkg_install
- osds::nfs::pkg_install
- osds::lvm::create_volume_group_for_file $fvg $size
- osds::lvm::create_volume_group $vg $size
-
- # Remove iscsi targets
- sudo tgtadm --op show --mode target | awk '/Target/ {print $3}' | sudo xargs -r -n1 tgt-admin --delete
- # Remove volumes that already exist.
- osds::lvm::remove_volumes $vg
- osds::lvm::set_configuration
- osds::lvm::set_configuration_for_file
-
- # Check nvmeof prerequisites
- local nvmevg=$NVME_VOLUME_GROUP_NAME
- if [[ -e "$LVM_DEVICE" ]]; then
- #phys_port_cnt=$(ibv_devinfo |grep -Eow hca_id |wc -l)
- #echo "The actual quantity of RDMA ports is $phys_port_cnt"
- #nvmetcpsupport=$(sudo modprobe nvmet-tcp)
- #if [[ "$phys_port_cnt" < '1' ]] && [ $nvmetcpsupport -ne 0 ] ; then
- # echo "RDMA card not found , and kernel version can not support nvme-tcp "
- #else
- osds::lvm::create_nvme_vg $nvmevg $size
- osds::lvm::nvmeofpkginstall
- # Remove volumes that already exist
- osds::lvm::remove_volumes $nvmevg
- osds::lvm::set_nvme_configuration
- #fi
- fi
- osds::lvm::set_lvm_filter
-}
-
-osds::lvm::cleanup(){
- osds::lvm::clean_volume_group $DEFAULT_VOLUME_GROUP_NAME
- osds::lvm::clean_volume_group_for_file $FILE_DEFAULT_VOLUME_GROUP_NAME
- osds::lvm::clean_lvm_filter
- local nvmevg=$NVME_VOLUME_GROUP_NAME
- if vgs $nvmevg ; then
- osds::lvm::clean_nvme_volume_group $nvmevg
- fi
-}
-
-osds::lvm::uninstall(){
- : # do nothing
-}
-
-osds::lvm::uninstall_purge(){
- echo osds::lvm::pkg_uninstall
- echo osds::lvm::nvmeofpkguninstall
-}
-
-# Restore xtrace
-$_XTRACE_LVM
diff --git a/install/devsds/lib/opensds.sh b/install/devsds/lib/opensds.sh
deleted file mode 100755
index 08b5aeae8..000000000
--- a/install/devsds/lib/opensds.sh
+++ /dev/null
@@ -1,112 +0,0 @@
-#!/bin/bash
-
-# Copyright 2018 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# OpenSDS relative operation.
-
-_XTRACE_OPENSDS=$(set +o | grep xtrace)
-set +o xtrace
-
-
-osds:opensds:configuration(){
-
-# Copy api spec file to configuration path
-cp $OPENSDS_DIR/openapi-spec/swagger.yaml $OPENSDS_CONFIG_DIR
-
-# Set global configuration.
-cat >> $OPENSDS_CONFIG_DIR/opensds.conf << OPENSDS_GLOBAL_CONFIG_DOC
-[osdsapiserver]
-api_endpoint = 0.0.0.0:50040
-auth_strategy = $OPENSDS_AUTH_STRATEGY
-# If https is enabled, the default value of cert file
-# is /opt/opensds-security/opensds/opensds-cert.pem,
-# and key file is /opt/opensds-security/opensds/opensds-key.pem
-https_enabled = False
-beego_https_cert_file =
-beego_https_key_file =
-
-[osdslet]
-api_endpoint = $HOST_IP:50049
-
-[osdsdock]
-api_endpoint = $HOST_IP:50050
-# Specify which backends should be enabled, sample,ceph,cinder,lvm,nfs and so on.
-enabled_backends = $OPENSDS_BACKEND_LIST
-
-[database]
-endpoint = $HOST_IP:$ETCD_PORT,$HOST_IP:$ETCD_PEER_PORT
-driver = etcd
-
-OPENSDS_GLOBAL_CONFIG_DOC
-}
-
-osds::opensds::install(){
- osds:opensds:configuration
-# Run osdsdock and osdslet daemon in background.
-(
- cd ${OPENSDS_DIR}
- sudo build/out/bin/osdsapiserver --daemon
- sudo build/out/bin/osdslet --daemon
- sudo build/out/bin/osdsdock --daemon
-
- osds::echo_summary "Waiting for osdsapiserver to come up."
- osds::util::wait_for_url localhost:50040 "osdsapiserver" 0.5 80
- if [ $OPENSDS_AUTH_STRATEGY == "keystone" ]; then
- if [ "true" == $USE_CONTAINER_KEYSTONE ]
- then
- KEYSTONE_IP=$HOST_IP
- export OS_AUTH_URL=http://$KEYSTONE_IP/identity
- export OS_USERNAME=admin
- export OS_PASSWORD=opensds@123
- export OS_TENANT_NAME=admin
- export OS_PROJECT_NAME=admin
- export OS_USER_DOMAIN_ID=default
- else
- local xtrace
- xtrace=$(set +o | grep xtrace)
- set +o xtrace
- source $DEV_STACK_DIR/openrc admin admin
- $xtrace
- fi
- fi
-
- # Copy bash completion script to system.
- cp ${OPENSDS_DIR}/osdsctl/completion/osdsctl.bash_completion /etc/bash_completion.d/
-
- export OPENSDS_AUTH_STRATEGY=$OPENSDS_AUTH_STRATEGY
- export OPENSDS_ENDPOINT=http://localhost:50040
- build/out/bin/osdsctl profile create '{"name": "default_block", "description": "default policy", "storageType": "block"}'
- build/out/bin/osdsctl profile create '{"name": "default_file", "description": "default policy", "storageType": "file", "provisioningProperties":{"ioConnectivity": {"accessProtocol": "NFS"},"DataStorage":{"StorageAccessCapability":["Read","Write","Execute"]}}}'
-
- if [ $? == 0 ]; then
- osds::echo_summary devsds installed successfully !!
- fi
-)
-}
-
-osds::opensds::cleanup() {
- sudo killall -9 osdsapiserver osdslet osdsdock &>/dev/null
-}
-
-osds::opensds::uninstall(){
- : # Do nothing
-}
-
-osds::opensds::uninstall_purge(){
- : # Do nothing
-}
-
-# Restore xtrace
-$_XTRACE_OPENSDS
diff --git a/install/devsds/lib/openssl.cnf b/install/devsds/lib/openssl.cnf
deleted file mode 100755
index 88a724b49..000000000
--- a/install/devsds/lib/openssl.cnf
+++ /dev/null
@@ -1,350 +0,0 @@
-#
-# OpenSSL example configuration file.
-# This is mostly being used for generation of certificate requests.
-#
-
-# This definition stops the following lines choking if HOME isn't
-# defined.
-HOME = .
-RANDFILE = $ENV::HOME/.rnd
-
-# Extra OBJECT IDENTIFIER info:
-#oid_file = $ENV::HOME/.oid
-oid_section = new_oids
-
-# To use this configuration file with the "-extfile" option of the
-# "openssl x509" utility, name here the section containing the
-# X.509v3 extensions to use:
-# extensions =
-# (Alternatively, use a configuration file that has only
-# X.509v3 extensions in its main [= default] section.)
-
-[ new_oids ]
-
-# We can add new OIDs in here for use by 'ca', 'req' and 'ts'.
-# Add a simple OID like this:
-# testoid1=1.2.3.4
-# Or use config file substitution like this:
-# testoid2=${testoid1}.5.6
-
-# Policies used by the TSA examples.
-tsa_policy1 = 1.2.3.4.1
-tsa_policy2 = 1.2.3.4.5.6
-tsa_policy3 = 1.2.3.4.5.7
-
-####################################################################
-[ ca ]
-default_ca = CA_default # The default ca section
-
-####################################################################
-[ CA_default ]
-
-dir = ./demoCA # Where everything is kept
-certs = $dir/certs # Where the issued certs are kept
-crl_dir = $dir/crl # Where the issued crl are kept
-database = $dir/index.txt # database index file.
-#unique_subject = no # Set to 'no' to allow creation of
- # several ctificates with same subject.
-new_certs_dir = $dir/newcerts # default place for new certs.
-
-certificate = $dir/cacert.pem # The CA certificate
-serial = $dir/serial # The current serial number
-crlnumber = $dir/crlnumber # the current crl number
- # must be commented out to leave a V1 CRL
-crl = $dir/crl.pem # The current CRL
-private_key = $dir/private/cakey.pem# The private key
-RANDFILE = $dir/private/.rand # private random number file
-
-x509_extensions = usr_cert # The extentions to add to the cert
-
-# Comment out the following two lines for the "traditional"
-# (and highly broken) format.
-name_opt = ca_default # Subject Name options
-cert_opt = ca_default # Certificate field options
-
-# Extension copying option: use with caution.
-# copy_extensions = copy
-
-# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs
-# so this is commented out by default to leave a V1 CRL.
-# crlnumber must also be commented out to leave a V1 CRL.
-# crl_extensions = crl_ext
-
-default_days = 365 # how long to certify for
-default_crl_days= 30 # how long before next CRL
-default_md = default # use public key default MD
-preserve = no # keep passed DN ordering
-
-# A few difference way of specifying how similar the request should look
-# For type CA, the listed attributes must be the same, and the optional
-# and supplied fields are just that :-)
-policy = policy_match
-
-# For the CA policy
-[ policy_match ]
-countryName = optional
-stateOrProvinceName = optional
-organizationName = optional
-organizationalUnitName = optional
-commonName = supplied
-emailAddress = optional
-
-# For the 'anything' policy
-# At this point in time, you must list all acceptable 'object'
-# types.
-[ policy_anything ]
-countryName = optional
-stateOrProvinceName = optional
-localityName = optional
-organizationName = optional
-organizationalUnitName = optional
-commonName = supplied
-emailAddress = optional
-
-####################################################################
-[ req ]
-default_bits = 2048
-default_keyfile = privkey.pem
-distinguished_name = req_distinguished_name
-attributes = req_attributes
-x509_extensions = v3_ca # The extentions to add to the self signed cert
-
-# Passwords for private keys if not present they will be prompted for
-# input_password = secret
-# output_password = secret
-
-# This sets a mask for permitted string types. There are several options.
-# default: PrintableString, T61String, BMPString.
-# pkix : PrintableString, BMPString (PKIX recommendation before 2004)
-# utf8only: only UTF8Strings (PKIX recommendation after 2004).
-# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings).
-# MASK:XXXX a literal mask value.
-# WARNING: ancient versions of Netscape crash on BMPStrings or UTF8Strings.
-string_mask = utf8only
-
-# req_extensions = v3_req # The extensions to add to a certificate request
-
-[ req_distinguished_name ]
-countryName = Country Name (2 letter code)
-countryName_default = AU
-countryName_min = 2
-countryName_max = 2
-
-stateOrProvinceName = State or Province Name (full name)
-stateOrProvinceName_default = Some-State
-
-localityName = Locality Name (eg, city)
-
-0.organizationName = Organization Name (eg, company)
-0.organizationName_default = Internet Widgits Pty Ltd
-
-# we can do this but it is not needed normally :-)
-#1.organizationName = Second Organization Name (eg, company)
-#1.organizationName_default = World Wide Web Pty Ltd
-
-organizationalUnitName = Organizational Unit Name (eg, section)
-#organizationalUnitName_default =
-
-commonName = Common Name (e.g. server FQDN or YOUR name)
-commonName_max = 64
-
-emailAddress = Email Address
-emailAddress_max = 64
-
-# SET-ex3 = SET extension number 3
-
-[ req_attributes ]
-challengePassword = A challenge password
-challengePassword_min = 4
-challengePassword_max = 20
-
-unstructuredName = An optional company name
-
-[ usr_cert ]
-
-# These extensions are added when 'ca' signs a request.
-
-# This goes against PKIX guidelines but some CAs do it and some software
-# requires this to avoid interpreting an end user certificate as a CA.
-
-basicConstraints=CA:FALSE
-
-# Here are some examples of the usage of nsCertType. If it is omitted
-# the certificate can be used for anything *except* object signing.
-
-# This is OK for an SSL server.
-# nsCertType = server
-
-# For an object signing certificate this would be used.
-# nsCertType = objsign
-
-# For normal client use this is typical
-# nsCertType = client, email
-
-# and for everything including object signing:
-# nsCertType = client, email, objsign
-
-# This is typical in keyUsage for a client certificate.
-# keyUsage = nonRepudiation, digitalSignature, keyEncipherment
-
-# This will be displayed in Netscape's comment listbox.
-nsComment = "OpenSSL Generated Certificate"
-
-# PKIX recommendations harmless if included in all certificates.
-subjectKeyIdentifier=hash
-authorityKeyIdentifier=keyid,issuer
-
-# This stuff is for subjectAltName and issuerAltname.
-# Import the email address.
-# subjectAltName=email:copy
-# An alternative to produce certificates that aren't
-# deprecated according to PKIX.
-# subjectAltName=email:move
-
-# Copy subject details
-# issuerAltName=issuer:copy
-
-#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem
-#nsBaseUrl
-#nsRevocationUrl
-#nsRenewalUrl
-#nsCaPolicyUrl
-#nsSslServerName
-
-# This is required for TSA certificates.
-# extendedKeyUsage = critical,timeStamping
-
-[ v3_req ]
-
-# Extensions to add to a certificate request
-
-basicConstraints = CA:FALSE
-keyUsage = nonRepudiation, digitalSignature, keyEncipherment
-
-[ v3_ca ]
-
-
-# Extensions for a typical CA
-
-
-# PKIX recommendation.
-
-subjectKeyIdentifier=hash
-
-authorityKeyIdentifier=keyid:always,issuer
-
-# This is what PKIX recommends but some broken software chokes on critical
-# extensions.
-#basicConstraints = critical,CA:true
-# So we do this instead.
-basicConstraints = CA:true
-
-# Key usage: this is typical for a CA certificate. However since it will
-# prevent it being used as an test self-signed certificate it is best
-# left out by default.
-# keyUsage = cRLSign, keyCertSign
-
-# Some might want this also
-# nsCertType = sslCA, emailCA
-
-# Include email address in subject alt name: another PKIX recommendation
-# subjectAltName=email:copy
-# Copy issuer details
-# issuerAltName=issuer:copy
-
-# DER hex encoding of an extension: beware experts only!
-# obj=DER:02:03
-# Where 'obj' is a standard or added object
-# You can even override a supported extension:
-# basicConstraints= critical, DER:30:03:01:01:FF
-
-[ crl_ext ]
-
-# CRL extensions.
-# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL.
-
-# issuerAltName=issuer:copy
-authorityKeyIdentifier=keyid:always
-
-[ proxy_cert_ext ]
-# These extensions should be added when creating a proxy certificate
-
-# This goes against PKIX guidelines but some CAs do it and some software
-# requires this to avoid interpreting an end user certificate as a CA.
-
-basicConstraints=CA:FALSE
-
-# Here are some examples of the usage of nsCertType. If it is omitted
-# the certificate can be used for anything *except* object signing.
-
-# This is OK for an SSL server.
-# nsCertType = server
-
-# For an object signing certificate this would be used.
-# nsCertType = objsign
-
-# For normal client use this is typical
-# nsCertType = client, email
-
-# and for everything including object signing:
-# nsCertType = client, email, objsign
-
-# This is typical in keyUsage for a client certificate.
-# keyUsage = nonRepudiation, digitalSignature, keyEncipherment
-
-# This will be displayed in Netscape's comment listbox.
-nsComment = "OpenSSL Generated Certificate"
-
-# PKIX recommendations harmless if included in all certificates.
-subjectKeyIdentifier=hash
-authorityKeyIdentifier=keyid,issuer
-
-# This stuff is for subjectAltName and issuerAltname.
-# Import the email address.
-# subjectAltName=email:copy
-# An alternative to produce certificates that aren't
-# deprecated according to PKIX.
-# subjectAltName=email:move
-
-# Copy subject details
-# issuerAltName=issuer:copy
-
-#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem
-#nsBaseUrl
-#nsRevocationUrl
-#nsRenewalUrl
-#nsCaPolicyUrl
-#nsSslServerName
-
-# This really needs to be in place for it to be a proxy certificate.
-proxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo
-
-####################################################################
-[ tsa ]
-
-default_tsa = tsa_config1 # the default TSA section
-
-[ tsa_config1 ]
-
-# These are used by the TSA reply generation only.
-dir = ./demoCA # TSA root directory
-serial = $dir/tsaserial # The current serial number (mandatory)
-crypto_device = builtin # OpenSSL engine to use for signing
-signer_cert = $dir/tsacert.pem # The TSA signing certificate
- # (optional)
-certs = $dir/cacert.pem # Certificate chain to include in reply
- # (optional)
-signer_key = $dir/private/tsakey.pem # The TSA private key (optional)
-
-default_policy = tsa_policy1 # Policy if request did not specify it
- # (optional)
-other_policies = tsa_policy2, tsa_policy3 # acceptable policies (optional)
-digests = md5, sha1 # Acceptable message digests (mandatory)
-accuracy = secs:1, millisecs:500, microsecs:100 # (optional)
-clock_precision_digits = 0 # number of digits after dot. (optional)
-ordering = yes # Is ordering defined for timestamps?
- # (optional, default: no)
-tsa_name = yes # Must the TSA name be included in the reply?
- # (optional, default: no)
-ess_cert_id_chain = no # Must the ESS cert id chain be included?
- # (optional, default: no)
diff --git a/install/devsds/lib/util.sh b/install/devsds/lib/util.sh
deleted file mode 100755
index 994206655..000000000
--- a/install/devsds/lib/util.sh
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/bin/bash
-
-# Copyright 2017 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-osds::util::sortable_date() {
- date "+%Y%m%d-%H%M%S"
-}
-
-osds::util::wait_for_url() {
- local url=$1
- local prefix=${2:-}
- local wait=${3:-1}
- local times=${4:-30}
-
- which curl >/dev/null || {
- osds::echo_summary "curl must be installed"
- exit 1
- }
-
- local i
- for i in $(seq 1 $times); do
- local out
- if out=$(curl --max-time 1 -gkfs $url 2>/dev/null); then
- osds::echo_summary "On try ${i}, ${prefix}: ${out}"
- return 0
- fi
- sleep ${wait}
- done
- osds::echo_summary "Timed out waiting for ${prefix} to answer at ${url}; tried ${times} waiting ${wait} between each"
- return 1
-}
-
-# returns a random port
-osds::util::get_random_port() {
- awk -v min=1024 -v max=65535 'BEGIN{srand(); print int(min+rand()*(max-min+1))}'
-}
-
-# use netcat to check if the host($1):port($2) is free (return 0 means free, 1 means used)
-osds::util::test_host_port_free() {
- local host=$1
- local port=$2
- local success=0
- local fail=1
-
- which nc >/dev/null || {
- osds::echo_summary "netcat isn't installed, can't verify if ${host}:${port} is free, skipping the check..."
- return ${success}
- }
-
- if [ ! $(nc -vz "${host}" "${port}") ]; then
- echo "${host}:${port} is free, proceeding..."
- return ${success}
- else
- echo "${host}:${port} is already used"
- return ${fail}
- fi
-}
-
-osds::util::download_file() {
- local -r url=$1
- local -r destination_file=$2
-
- rm ${destination_file} 2&> /dev/null || true
-
- for i in $(seq 5)
- do
- if ! curl -fsSL --retry 3 --keepalive-time 2 ${url} -o ${destination_file}; then
- echo "Downloading ${url} failed. $((5-i)) retries left."
- sleep 1
- else
- echo "Downloading ${url} succeed"
- return 0
- fi
- done
- return 1
-}
-
-# Prints line number and "message" in warning format
-# warn $LINENO "message"
-osds::util::warn() {
- local exitcode=$?
- local xtrace
- xtrace=$(set +o | grep xtrace)
- set +o xtrace
- local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2"
- echo $msg
- $xtrace
- return $exitcode
-}
-
-# Prints line number and "message" in error format
-# err $LINENO "message"
-osds::util::err() {
- local exitcode=$?
- local xtrace
- xtrace=$(set +o | grep xtrace)
- set +o xtrace
- local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2"
- echo $msg 1>&2;
- $xtrace
- return $exitcode
-}
-
-# Prints line number and "message" then exits
-# die $LINENO "message"
-osds::util::die() {
- local exitcode=$?
- set +o xtrace
- local line=$1; shift
- if [ $exitcode == 0 ]; then
- exitcode=1
- fi
- osds::util::err $line "$*"
- # Give buffers a second to flush
- sleep 1
- exit $exitcode
-}
-
-osds::util::get_default_host_ip() {
- local host_ip=$1
- local af=$2
- # Search for an IP unless an explicit is set by ``HOST_IP`` environment variable
- if [ -z "$host_ip" ]; then
- host_ip=""
- # Find the interface used for the default route
- host_ip_iface=${host_ip_iface:-$(ip -f $af route | awk '/default/ {print $5}' | head -1)}
- local host_ips
- host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | sed /temporary/d |awk /$af'/ {split($2,parts,"/"); print parts[1]}')
- local ip
- for ip in $host_ips; do
- host_ip=$ip
- break;
- done
- fi
- echo $host_ip
-}
-
-osds::util::is_service_enabled() {
- local enabled=1
- local services=$@
- local service
- for service in ${services}; do
- [[ ,${OPENSDS_ENABLED_SERVICES}, =~ ,${service}, ]] && enabled=0
- done
- return $enabled
-}
-
-osds::util::serice_operation() {
- local action=$1
- for service in $(echo $SUPPORT_SERVICES|tr ',' ' '); do
- if osds::util::is_service_enabled $service; then
- source $TOP_DIR/lib/$service.sh
- osds::$service::$action
- fi
- done
-}
diff --git a/install/devsds/local.conf b/install/devsds/local.conf
deleted file mode 100755
index 10bb844cb..000000000
--- a/install/devsds/local.conf
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright 2019 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Host Ip which is used to service ip binding, including osdslet, osdsdock, etcd, keystone etc.
-# If HOST_IP is not set, the script will use the ip of default gateway interface as the host ip.
-# HOST_IP=192.168.56.100
-
-# OpenSDS storage backend list, separated by a comma, support lvm and nfs right now.
-OPENSDS_BACKEND_LIST=lvm,nfs
-
-# OpenSDS authentication strategy, can support keystone, noauth.
-OPENSDS_AUTH_STRATEGY=noauth
-
-# If opensds will use an existing keystone, USE_EXISTING_KEYSTONE must be set to true
-# and KEYSTONE_IP must be set
-USE_EXISTING_KEYSTONE=false
-# KEYSTONE_IP=10.10.3.101
-
-# If opensds will use a keystone installed in a container, USE_CONTAINER_KEYSTONE=true
-USE_CONTAINER_KEYSTONE=true
diff --git a/install/devsds/sdsrc b/install/devsds/sdsrc
deleted file mode 100755
index 36542c80e..000000000
--- a/install/devsds/sdsrc
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright 2018 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-source $TOP_DIR/local.conf
-
-# Global
-HOST_IP=${HOST_IP:-}
-HOST_IP=$(osds::util::get_default_host_ip "$HOST_IP" "inet")
-if [ "$HOST_IP" == "" ]; then
- osds::util::die $LINENO "Could not determine host ip address. See local.conf for suggestions on setting HOST_IP."
-fi
-
-# OpenSDS configuration.
-OPENSDS_VERSION=${OPENSDS_VERSION:-v1beta}
-#openstack authentication strategy, support 'noauth', 'keystone'.
-OPENSDS_AUTH_STRATEGY=${OPENSDS_AUTH_STRATEGY:-noauth}
-# OpenSDS service name in keystone.
-OPENSDS_SERVER_NAME=${OPENSDS_SERVER_NAME:-opensds}
-# OpenSDS backend list.
-OPENSDS_BACKEND_LIST=${OPENSDS_BACKEND_LIST:-lvm}
-
-# devstack keystone configuration
-STACK_GIT_BASE=${STACK_GIT_BASE:-https://git.openstack.org}
-STACK_USER_NAME=${STACK_USER_NAME:-stack}
-STACK_PASSWORD=${STACK_PASSWORD:-opensds@123}
-STACK_HOME=${STACK_HOME:-/opt/stack}
-STACK_BRANCH=${STACK_BRANCH:-stable/queens}
-DEV_STACK_DIR=$STACK_HOME/devstack
-
-# ETCD configuration
-ETCD_VERSION=${ETCD_VERSION:-3.3.10}
-ETCD_HOST=${ETCD_HOST:-$HOST_IP}
-ETCD_PORT=${ETCD_PORT:-62379}
-ETCD_PEER_PORT=${ETCD_PEER_PORT:-62380}
-ETCD_DIR=${OPT_DIR}/etcd
-ETCD_LOGFILE=${ETCD_DIR}/etcd.log
-ETCD_DATADIR=${ETCD_DIR}/data
-
-OPENSDS_ENABLED_SERVICES=opensds,etcd,certificate
-if [ $OPENSDS_AUTH_STRATEGY = keystone ];then
- OPENSDS_ENABLED_SERVICES+=,keystone
-fi
-OPENSDS_ENABLED_SERVICES+=,$OPENSDS_BACKEND_LIST
-SUPPORT_SERVICES=certificate,keystone,lvm,ceph,etcd,opensds
diff --git a/install/devsds/tools/outfilter.py b/install/devsds/tools/outfilter.py
deleted file mode 100755
index 5624aca85..000000000
--- a/install/devsds/tools/outfilter.py
+++ /dev/null
@@ -1,106 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2014 Hewlett-Packard Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is an output filter to filter and timestamp the logs from Grenade and
-# DevStack. Largely our awk filters got beyond the complexity level which were
-# sustainable, so this provides us much more control in a single place.
-#
-# The overhead of running python should be less than execing `date` a million
-# times during a run.
-
-import argparse
-import datetime
-import re
-import sys
-
-IGNORE_LINES = re.compile('(set \+o|xtrace)')
-HAS_DATE = re.compile('^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3} \|')
-
-
-def get_options():
- parser = argparse.ArgumentParser(
- description='Filter output by DevStack and friends')
- parser.add_argument('-o', '--outfile',
- help='Output file for content',
- default=None)
- # NOTE(ianw): This is intended for the case where your stdout is
- # being captured by something like ansible which independently
- # logs timestamps on the lines it receives. Note that if using a
- # output file, those log lines are still timestamped.
- parser.add_argument('-b', '--no-timestamp', action='store_true',
- help='Do not prefix stdout with timestamp (bare)',
- default=False)
- parser.add_argument('-v', '--verbose', action='store_true',
- default=False)
- return parser.parse_args()
-
-
-def skip_line(line):
- """Should we skip this line."""
- return IGNORE_LINES.search(line) is not None
-
-
-def main():
- opts = get_options()
- outfile = None
- if opts.outfile:
- # note, binary mode so we can do unbuffered output.
- outfile = open(opts.outfile, 'ab', 0)
-
- # Otherwise fileinput reprocess args as files
- sys.argv = []
-
- for line in iter(sys.stdin.readline, ''):
- # put skip lines here
- if skip_line(line):
- continue
-
- # This prevents us from nesting date lines, because we'd like
- # to pull this in directly in Grenade and not double up on
- # DevStack lines.
- # NOTE(ianw): we could actually strip the extra ts in "bare"
- # mode (which came after this)? ... as we get more experience
- # with zuulv3 native jobs and ansible capture it may become
- # clearer what to do
- if HAS_DATE.search(line) is None:
- now = datetime.datetime.utcnow()
- ts_line = ("%s | %s" % (
- now.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3],
- line))
- else:
- ts_line = line
-
- if opts.verbose:
- sys.stdout.write(line if opts.no_timestamp else ts_line)
- sys.stdout.flush()
-
- if outfile:
- # We've opened outfile as a binary file to get the
- # non-buffered behaviour. on python3, sys.stdin was
- # opened with the system encoding and made the line into
- # utf-8, so write the logfile out in utf-8 bytes.
- if sys.version_info < (3,):
- outfile.write(ts_line)
- else:
- outfile.write(ts_line.encode('utf-8'))
- outfile.flush()
-
-
-if __name__ == '__main__':
- try:
- sys.exit(main())
- except KeyboardInterrupt:
- sys.exit(1)
diff --git a/install/devsds/uninstall.sh b/install/devsds/uninstall.sh
deleted file mode 100755
index 992d94731..000000000
--- a/install/devsds/uninstall.sh
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/bin/bash
-
-# Copyright 2017 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-osds:usage()
-{
- echo "Usage: $(basename $0) [--help|--cleanup|--purge]"
-cat << OSDS_HELP_UNINSTALL_INFO_DOC
-Usage:
- $(basename $0) [-h|--help]
- $(basename $0) [-c|--cleanup]
- $(basename $0) [-p|--purge]
-Flags:
- -h, --help Print this information.
- -c, --cleanup Stop service and clean up some application data.
- -p, --purge Remove package, config file, log file.
-OSDS_HELP_UNINSTALL_INFO_DOC
-}
-
-# Parse parameter first
-case "$# $*" in
- "0 "|"1 --purge"|"1 -p"|"1 --cleanup"|"1 -c")
- ;;
- "1 -h"|"1 --help")
- osds:usage
- exit 0
- ;;
- *)
- osds:usage
- exit 1
- ;;
-esac
-
-set -o xtrace
-
-# Keep track of the script directory
-TOP_DIR=$(cd $(dirname "$0") && pwd)
-# Temporary dir for testing
-OPT_DIR=/opt/opensds
-OPT_BIN=$OPT_DIR/bin
-
-source $TOP_DIR/lib/util.sh
-source $TOP_DIR/sdsrc
-
-osds::cleanup() {
- osds::util::serice_operation cleanup
-}
-
-osds::uninstall(){
- osds::cleanup
- osds::util::serice_operation uninstall
-}
-
-osds::uninstall_purge(){
- osds::uninstall
- osds::util::serice_operation uninstall_purge
-
- rm /opt/opensds -rf
- rm /etc/opensds -rf
- rm /var/log/opensds -rf
- rm /etc/bash_completion.d/osdsctl.bash_completion -rf
- rm /opt/opensds-security -rf
-}
-
-case "$# $*" in
- "1 -c"|"1 --cleanup")
- osds::cleanup
- ;;
- "0 ")
- osds::uninstall
- ;;
- "1 -p"|"1 --purge")
- osds::uninstall_purge
- ;;
- *)
- osds:usage
- exit 1
- ;;
-esac
diff --git a/install/kubernetes/README.md b/install/kubernetes/README.md
deleted file mode 100644
index d0cf64f53..000000000
--- a/install/kubernetes/README.md
+++ /dev/null
@@ -1,193 +0,0 @@
-# Install OpenSDS on an existing Kubernetes cluster
-
-This tutorial assumes that you already have an existing Kubernetes cluster with
-kube-dns service enabled. If there is some DNS problems with your Kubernetes
-cluster, please refer to [here](https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/)
-for debugging resolution.
-
-## Prepare
-Before you start, please make sure you have all stuffs below ready:
-- Kubernetes cluster (suggest v1.13.x or later).
-- More than 30GB remaining disk.
-- Make sure have access to the Internet.
-
-## Step by Step Installation
-### Configuration
-Firstly, you need to configure some global files with command below:
-```shell
-export BackendType="sample" # 'sample' is the default option, currently also support 'lvm'
-
-mkdir -p /etc/opensds && sudo cat > /etc/opensds/opensds.conf < /etc/opensds/driver/lvm.yaml < 50040/TCP 21m
-authchecker ClusterIP 10.0.0.151 80/TCP 21m
-controller ClusterIP 10.0.0.217 50049/TCP 21m
-dashboard NodePort 10.0.0.253 8088:31975/TCP 21m
-db ClusterIP 10.0.0.225 2379/TCP,2380/TCP 21m
-dock ClusterIP 10.0.0.144 50050/TCP 21m
-```
-
-## Test work
-
-### OpenSDS CLI tool
-#### Download cli tool.
-```
-wget https://github.com/opensds/opensds/releases/download/v0.5.2/opensds-hotpot-v0.5.2-linux-amd64.tar.gz
-tar zxvf opensds-hotpot-v0.5.2-linux-amd64.tar.gz
-cp opensds-hotpot-v0.5.2-linux-amd64/bin/* /usr/local/bin
-chmod 755 /usr/local/bin/osdsctl
-
-export OPENSDS_ENDPOINT=http://{{ apiserver_cluster_ip }}:50040
-export OPENSDS_AUTH_STRATEGY=keystone
-export OS_AUTH_URL=http://{{ authchecker_cluster_ip }}/identity
-export OS_USERNAME=admin
-export OS_PASSWORD=opensds@123
-export OS_TENANT_NAME=admin
-export OS_PROJECT_NAME=admin
-export OS_USER_DOMAIN_ID=default
-
-osdsctl pool list
-```
-
-#### Create a default profile firstly.
-```
-osdsctl profile create '{"name": "default", "description": "default policy", "storageType": "block"}'
-```
-
-#### Create a volume.
-```
-osdsctl volume create 1 --name=test-001
-```
-
-#### List all volumes.
-```
-osdsctl volume list
-```
-
-#### Delete the volume.
-```
-osdsctl volume delete
-```
-
-### OpenSDS UI
-OpenSDS UI dashboard is available at `http://{your_host_ip}:31975`, please login the dashboard using the default admin credentials: `admin/opensds@123`. Create `tenant`, `user`, and `profiles` as admin. Multi-Cloud service is also supported by dashboard.
-
-Logout of the dashboard as admin and login the dashboard again as a non-admin user to manage storage resource:
-
-#### Volume Service
-* Create volume
-* Create snapshot
-* Expand volume size
-* Create volume from snapshot
-* Create volume group
-
-After this is done, just enjoy it!
diff --git a/install/kubernetes/istio-networking/destination-rule-all.yaml b/install/kubernetes/istio-networking/destination-rule-all.yaml
deleted file mode 100644
index cccc7e73a..000000000
--- a/install/kubernetes/istio-networking/destination-rule-all.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright 2019 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: networking.istio.io/v1alpha3
-kind: DestinationRule
-metadata:
- name: apiserver
- namespace: opensds
-spec:
- host: apiserver.opensds.svc.cluster.local
- subsets:
- - name: v1beta
- labels:
- version: v1beta
----
-apiVersion: networking.istio.io/v1alpha3
-kind: DestinationRule
-metadata:
- name: controller
- namespace: opensds
-spec:
- host: controller.opensds.svc.cluster.local
- subsets:
- - name: v1beta
- labels:
- version: v1beta
----
-apiVersion: networking.istio.io/v1alpha3
-kind: DestinationRule
-metadata:
- name: dock
- namespace: opensds
-spec:
- host: dock.opensds.svc.cluster.local
- subsets:
- - name: v1beta
- labels:
- version: v1beta
----
diff --git a/install/kubernetes/istio-networking/opensds-gateway.yaml b/install/kubernetes/istio-networking/opensds-gateway.yaml
deleted file mode 100644
index ee67c2db3..000000000
--- a/install/kubernetes/istio-networking/opensds-gateway.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright 2019 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: networking.istio.io/v1alpha3
-kind: Gateway
-metadata:
- name: opensds-gateway
- namespace: opensds
-spec:
- selector:
- istio: ingressgateway # use istio default controller
- servers:
- - port:
- number: 80
- name: http-apiserver
- protocol: HTTP
- hosts:
- - "*"
----
-apiVersion: networking.istio.io/v1alpha3
-kind: VirtualService
-metadata:
- name: opensds
- namespace: opensds
-spec:
- hosts:
- - "*"
- gateways:
- - opensds-gateway
- http:
- - route:
- - destination:
- host: apiserver.opensds.svc.cluster.local
- port:
- number: 50040
- subset: v1beta
- weight: 100
diff --git a/install/kubernetes/istio-networking/virtual-service-all.yaml b/install/kubernetes/istio-networking/virtual-service-all.yaml
deleted file mode 100644
index 0bd39b1cc..000000000
--- a/install/kubernetes/istio-networking/virtual-service-all.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright 2019 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: networking.istio.io/v1alpha3
-kind: VirtualService
-metadata:
- name: apiserver
- namespace: opensds
-spec:
- hosts:
- - apiserver.opensds.svc.cluster.local
- http:
- - route:
- - destination:
- host: apiserver.opensds.svc.cluster.local
- subset: v1beta
- weight: 100
----
-apiVersion: networking.istio.io/v1alpha3
-kind: VirtualService
-metadata:
- name: controller
- namespace: opensds
-spec:
- hosts:
- - controller.opensds.svc.cluster.local
- http:
- - route:
- - destination:
- host: controller.opensds.svc.cluster.local
- subset: v1beta
- weight: 100
----
-apiVersion: networking.istio.io/v1alpha3
-kind: VirtualService
-metadata:
- name: dock
- namespace: opensds
-spec:
- hosts:
- - dock.opensds.svc.cluster.local
- http:
- - route:
- - destination:
- host: dock.opensds.svc.cluster.local
- subset: v1beta
- weight: 100
----
diff --git a/install/kubernetes/opensds-all.yaml b/install/kubernetes/opensds-all.yaml
deleted file mode 100644
index 683001887..000000000
--- a/install/kubernetes/opensds-all.yaml
+++ /dev/null
@@ -1,369 +0,0 @@
-# Copyright 2019 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-##################################################################################################
-# Apiserver service
-##################################################################################################
-apiVersion: v1
-kind: Service
-metadata:
- name: apiserver
- namespace: opensds
- labels:
- app: apiserver
- service: apiserver
-spec:
- ports:
- - port: 50040
- name: http-apiserver
- selector:
- app: apiserver
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: apiserver-v1beta
- namespace: opensds
- labels:
- app: apiserver
- version: v1beta
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- app: apiserver
- version: v1beta
- spec:
- containers:
- - name: apiserver
- image: opensdsio/opensds-apiserver:latest
- imagePullPolicy: IfNotPresent
- command: ["bin/sh"]
- args: ["-c", "/usr/bin/osdsapiserver -logtostderr"]
- ports:
- - containerPort: 50040
- volumeMounts:
- - name: opensds-conf-dir
- mountPath: /etc/opensds
- volumes:
- - name: opensds-conf-dir
- hostPath:
- path: /etc/opensds
- type: Directory
----
-##################################################################################################
-# Controller service
-##################################################################################################
-apiVersion: v1
-kind: Service
-metadata:
- name: controller
- namespace: opensds
- labels:
- app: controller
- service: controller
-spec:
- ports:
- - port: 50049
- name: tcp-controller
- selector:
- app: controller
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: controller-v1beta
- namespace: opensds
- labels:
- app: controller
- version: v1beta
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- app: controller
- version: v1beta
- spec:
- containers:
- - name: controller
- image: opensdsio/opensds-controller:latest
- imagePullPolicy: IfNotPresent
- command: ["bin/sh"]
- args: ["-c", "/usr/bin/osdslet -logtostderr"]
- ports:
- - containerPort: 50049
- volumeMounts:
- - name: opensds-conf-dir
- mountPath: /etc/opensds
- volumes:
- - name: opensds-conf-dir
- hostPath:
- path: /etc/opensds
- type: Directory
----
-##################################################################################################
-# Dock service
-##################################################################################################
-apiVersion: v1
-kind: Service
-metadata:
- name: dock
- namespace: opensds
- labels:
- app: dock
- service: dock
-spec:
- ports:
- - port: 50050
- name: tcp-dock
- selector:
- app: dock
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: dock-v1beta
- namespace: opensds
- labels:
- app: dock
- version: v1beta
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- app: dock
- version: v1beta
- spec:
- containers:
- - name: dock
- securityContext:
- privileged: true
- capabilities:
- add: ["SYS_ADMIN"]
- allowPrivilegeEscalation: true
- image: opensdsio/opensds-dock:latest
- imagePullPolicy: IfNotPresent
- command: ["bin/sh"]
- args: ["-c", "/usr/sbin/tgtd; /usr/bin/osdsdock -logtostderr"]
- ports:
- - containerPort: 50050
- volumeMounts:
- - name: opensds-conf-dir
- mountPath: /etc/opensds
- - name: ceph-conf-dir
- mountPath: /etc/ceph
- - name: tgt-conf-dir
- mountPath: /etc/tgt
- mountPropagation: "Bidirectional"
- - name: run-dir
- mountPath: /run
- mountPropagation: "Bidirectional"
- - name: dev-dir
- mountPath: /dev
- mountPropagation: "HostToContainer"
- - name: local-time-file
- mountPath: /etc/localtime
- readOnly: true
- - name: lib-modules-dir
- mountPath: /lib/modules
- readOnly: true
- volumes:
- - name: opensds-conf-dir
- hostPath:
- path: /etc/opensds
- type: Directory
- - name: ceph-conf-dir
- hostPath:
- path: /etc/ceph
- type: DirectoryOrCreate
- - name: tgt-conf-dir
- hostPath:
- path: /etc/tgt
- type: Directory
- - name: run-dir
- hostPath:
- path: /run
- type: Directory
- - name: dev-dir
- hostPath:
- path: /dev
- type: Directory
- - name: local-time-file
- hostPath:
- path: /etc/localtime
- type: File
- - name: lib-modules-dir
- hostPath:
- path: /lib/modules
- type: Directory
----
-##################################################################################################
-# Dashboard service
-##################################################################################################
-apiVersion: v1
-kind: Service
-metadata:
- name: dashboard
- namespace: opensds
- labels:
- app: dashboard
- service: dashboard
-spec:
- ports:
- - port: 8088
- nodePort: 31975
- name: http-dashboard
- selector:
- app: dashboard
- type: NodePort
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: dashboard-v1beta
- namespace: opensds
- labels:
- app: dashboard
- version: v1beta
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- app: dashboard
- version: v1beta
- spec:
- containers:
- - name: dashboard
- image: opensdsio/dashboard:latest
- env:
- - name: OPENSDS_AUTH_URL
- value: http://authchecker.opensds.svc.cluster.local/identity
- - name: OPENSDS_HOTPOT_URL
- value: http://apiserver.opensds.svc.cluster.local:50040
- - name: OPENSDS_GELATO_URL
- value: http://127.0.0.1:8089
- imagePullPolicy: IfNotPresent
- ports:
- - containerPort: 8088
----
-##################################################################################################
-# DB service
-##################################################################################################
-apiVersion: v1
-kind: Service
-metadata:
- name: db
- namespace: opensds
- labels:
- app: db
- service: db
-spec:
- ports:
- - port: 2379
- name: tcp-db1
- - port: 2380
- name: tcp-db2
- selector:
- app: db
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: db-v1
- namespace: opensds
- labels:
- app: db
- version: v1
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- app: db
- version: v1
- spec:
- containers:
- - name: db
- image: quay.io/coreos/etcd:latest
- imagePullPolicy: IfNotPresent
- command: ["/bin/sh"]
- args: ["-c", "/usr/local/bin/etcd \
- --name s1 \
- --listen-client-urls http://0.0.0.0:2379 \
- --advertise-client-urls http://0.0.0.0:2379 \
- --listen-peer-urls http://0.0.0.0:2380 \
- --initial-advertise-peer-urls http://0.0.0.0:2380 \
- --initial-cluster s1=http://0.0.0.0:2380"]
- ports:
- - containerPort: 2379
- - containerPort: 2380
- volumeMounts:
- - name: etcd-cert-dir
- mountPath: /etc/ssl/certs
- volumes:
- - name: etcd-cert-dir
- hostPath:
- path: /usr/share/ca-certificates/
- type: Directory
----
-##################################################################################################
-# Authchecker service
-##################################################################################################
-apiVersion: v1
-kind: Service
-metadata:
- name: authchecker
- namespace: opensds
- labels:
- app: authchecker
- service: authchecker
-spec:
- ports:
- - port: 80
- name: http-authchecker
- selector:
- app: authchecker
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: authchecker-v1
- namespace: opensds
- labels:
- app: authchecker
- version: v1
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- app: authchecker
- version: v1
- spec:
- containers:
- - name: authchecker
- securityContext:
- privileged: true
- capabilities:
- add: ["SYS_ADMIN"]
- allowPrivilegeEscalation: true
- image: opensdsio/opensds-authchecker:latest
- imagePullPolicy: IfNotPresent
- ports:
- - containerPort: 80
----
diff --git a/install/kubernetes/opensds-apiserver-v1.yaml b/install/kubernetes/opensds-apiserver-v1.yaml
deleted file mode 100644
index 2154635fb..000000000
--- a/install/kubernetes/opensds-apiserver-v1.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright 2019 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-##################################################################################################
-# Apiserver service v1
-##################################################################################################
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: apiserver-v1
- namespace: opensds
- labels:
- app: apiserver
- version: v1
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- app: apiserver
- version: v1
- spec:
- containers:
- - name: apiserver
- image: opensdsio/opensds-apiserver:latest
- imagePullPolicy: IfNotPresent
- command: ["bin/sh"]
- args: ["-c", "/usr/bin/osdsapiserver -logtostderr"]
- ports:
- - containerPort: 50040
- volumeMounts:
- - name: opensds-conf-dir
- mountPath: /etc/opensds
- volumes:
- - name: opensds-conf-dir
- hostPath:
- path: /etc/opensds
- type: Directory
----
diff --git a/install/kubernetes/opensds-apiserver-v1beta.yaml b/install/kubernetes/opensds-apiserver-v1beta.yaml
deleted file mode 100644
index f98d05414..000000000
--- a/install/kubernetes/opensds-apiserver-v1beta.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright 2019 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-##################################################################################################
-# Apiserver service v1beta
-##################################################################################################
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: apiserver-v1beta
- namespace: opensds
- labels:
- app: apiserver
- version: v1beta
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- app: apiserver
- version: v1beta
- spec:
- containers:
- - name: apiserver
- image: opensdsio/opensds-apiserver:latest
- imagePullPolicy: IfNotPresent
- command: ["bin/sh"]
- args: ["-c", "/usr/bin/osdsapiserver -logtostderr"]
- ports:
- - containerPort: 50040
- volumeMounts:
- - name: opensds-conf-dir
- mountPath: /etc/opensds
- volumes:
- - name: opensds-conf-dir
- hostPath:
- path: /etc/opensds
- type: Directory
----
diff --git a/install/kubernetes/opensds-controller-v1.yaml b/install/kubernetes/opensds-controller-v1.yaml
deleted file mode 100644
index fd91ba5d8..000000000
--- a/install/kubernetes/opensds-controller-v1.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright 2019 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-##################################################################################################
-# Controller service
-##################################################################################################
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: controller-v1
- namespace: opensds
- labels:
- app: controller
- version: v1
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- app: controller
- version: v1
- spec:
- containers:
- - name: controller
- image: opensdsio/opensds-controller:latest
- imagePullPolicy: IfNotPresent
- command: ["bin/sh"]
- args: ["-c", "/usr/bin/osdslet -logtostderr"]
- ports:
- - containerPort: 50049
- volumeMounts:
- - name: opensds-conf-dir
- mountPath: /etc/opensds
- volumes:
- - name: opensds-conf-dir
- hostPath:
- path: /etc/opensds
- type: Directory
----
diff --git a/install/kubernetes/opensds-external.yaml b/install/kubernetes/opensds-external.yaml
deleted file mode 100644
index c5ff9f7bc..000000000
--- a/install/kubernetes/opensds-external.yaml
+++ /dev/null
@@ -1,120 +0,0 @@
-# Copyright 2019 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-##################################################################################################
-# DB service
-##################################################################################################
-apiVersion: v1
-kind: Service
-metadata:
- name: db
- namespace: opensds
- labels:
- app: db
- service: db
-spec:
- ports:
- - port: 2379
- name: tcp-db1
- - port: 2380
- name: tcp-db2
- selector:
- app: db
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: db-v1
- namespace: opensds
- labels:
- app: db
- version: v1
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- app: db
- version: v1
- spec:
- containers:
- - name: db
- image: quay.io/coreos/etcd:latest
- imagePullPolicy: IfNotPresent
- command: ["/bin/sh"]
- args: ["-c", "/usr/local/bin/etcd \
- --name s1 \
- --listen-client-urls http://0.0.0.0:2379 \
- --advertise-client-urls http://0.0.0.0:2379 \
- --listen-peer-urls http://0.0.0.0:2380 \
- --initial-advertise-peer-urls http://0.0.0.0:2380 \
- --initial-cluster s1=http://0.0.0.0:2380"]
- ports:
- - containerPort: 2379
- - containerPort: 2380
- volumeMounts:
- - name: etcd-cert-dir
- mountPath: /etc/ssl/certs
- volumes:
- - name: etcd-cert-dir
- hostPath:
- path: /usr/share/ca-certificates/
- type: Directory
----
-##################################################################################################
-# Authchecker service
-##################################################################################################
-apiVersion: v1
-kind: Service
-metadata:
- name: authchecker
- namespace: opensds
- labels:
- app: authchecker
- service: authchecker
-spec:
- ports:
- - port: 80
- name: http-authchecker
- selector:
- app: authchecker
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: authchecker-v1
- namespace: opensds
- labels:
- app: authchecker
- version: v1
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- app: authchecker
- version: v1
- spec:
- containers:
- - name: authchecker
- securityContext:
- privileged: true
- capabilities:
- add: ["SYS_ADMIN"]
- allowPrivilegeEscalation: true
- image: opensdsio/opensds-authchecker:latest
- imagePullPolicy: IfNotPresent
- ports:
- - containerPort: 80
----
diff --git a/install/kubernetes/opensds-internal.yaml b/install/kubernetes/opensds-internal.yaml
deleted file mode 100644
index 99736abae..000000000
--- a/install/kubernetes/opensds-internal.yaml
+++ /dev/null
@@ -1,263 +0,0 @@
-# Copyright 2019 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-##################################################################################################
-# Apiserver service
-##################################################################################################
-apiVersion: v1
-kind: Service
-metadata:
- name: apiserver
- namespace: opensds
- labels:
- app: apiserver
- service: apiserver
-spec:
- ports:
- - port: 50040
- name: http-apiserver
- selector:
- app: apiserver
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: apiserver-v1beta
- namespace: opensds
- labels:
- app: apiserver
- version: v1beta
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- app: apiserver
- version: v1beta
- spec:
- containers:
- - name: apiserver
- image: opensdsio/opensds-apiserver:latest
- imagePullPolicy: IfNotPresent
- command: ["bin/sh"]
- args: ["-c", "/usr/bin/osdsapiserver -logtostderr"]
- ports:
- - containerPort: 50040
- volumeMounts:
- - name: opensds-conf-dir
- mountPath: /etc/opensds
- volumes:
- - name: opensds-conf-dir
- hostPath:
- path: /etc/opensds
- type: Directory
----
-##################################################################################################
-# Controller service
-##################################################################################################
-apiVersion: v1
-kind: Service
-metadata:
- name: controller
- namespace: opensds
- labels:
- app: controller
- service: controller
-spec:
- ports:
- - port: 50049
- name: tcp-controller
- selector:
- app: controller
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: controller-v1beta
- namespace: opensds
- labels:
- app: controller
- version: v1beta
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- app: controller
- version: v1beta
- spec:
- containers:
- - name: controller
- image: opensdsio/opensds-controller:latest
- imagePullPolicy: IfNotPresent
- command: ["bin/sh"]
- args: ["-c", "/usr/bin/osdslet -logtostderr"]
- ports:
- - containerPort: 50049
- volumeMounts:
- - name: opensds-conf-dir
- mountPath: /etc/opensds
- volumes:
- - name: opensds-conf-dir
- hostPath:
- path: /etc/opensds
- type: Directory
----
-##################################################################################################
-# Dock service
-##################################################################################################
-apiVersion: v1
-kind: Service
-metadata:
- name: dock
- namespace: opensds
- labels:
- app: dock
- service: dock
-spec:
- ports:
- - port: 50050
- name: tcp-dock
- selector:
- app: dock
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: dock-v1beta
- namespace: opensds
- labels:
- app: dock
- version: v1beta
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- app: dock
- version: v1beta
- spec:
- containers:
- - name: dock
- securityContext:
- privileged: true
- capabilities:
- add: ["SYS_ADMIN"]
- allowPrivilegeEscalation: true
- image: opensdsio/opensds-dock:latest
- imagePullPolicy: IfNotPresent
- command: ["bin/sh"]
- args: ["-c", "/usr/sbin/tgtd; /usr/bin/osdsdock -logtostderr"]
- ports:
- - containerPort: 50050
- volumeMounts:
- - name: opensds-conf-dir
- mountPath: /etc/opensds
- - name: ceph-conf-dir
- mountPath: /etc/ceph
- - name: tgt-conf-dir
- mountPath: /etc/tgt
- mountPropagation: "Bidirectional"
- - name: run-dir
- mountPath: /run
- mountPropagation: "Bidirectional"
- - name: dev-dir
- mountPath: /dev
- mountPropagation: "HostToContainer"
- - name: local-time-file
- mountPath: /etc/localtime
- readOnly: true
- - name: lib-modules-dir
- mountPath: /lib/modules
- readOnly: true
- volumes:
- - name: opensds-conf-dir
- hostPath:
- path: /etc/opensds
- type: Directory
- - name: ceph-conf-dir
- hostPath:
- path: /etc/ceph
- type: DirectoryOrCreate
- - name: tgt-conf-dir
- hostPath:
- path: /etc/tgt
- type: Directory
- - name: run-dir
- hostPath:
- path: /run
- type: Directory
- - name: dev-dir
- hostPath:
- path: /dev
- type: Directory
- - name: local-time-file
- hostPath:
- path: /etc/localtime
- type: File
- - name: lib-modules-dir
- hostPath:
- path: /lib/modules
- type: Directory
----
-##################################################################################################
-# Dashboard service
-##################################################################################################
-apiVersion: v1
-kind: Service
-metadata:
- name: dashboard
- namespace: opensds
- labels:
- app: dashboard
- service: dashboard
-spec:
- ports:
- - port: 8088
- nodePort: 31975
- name: http-dashboard
- selector:
- app: dashboard
- type: NodePort
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: dashboard-v1beta
- namespace: opensds
- labels:
- app: dashboard
- version: v1beta
-spec:
- replicas: 1
- template:
- metadata:
- labels:
- app: dashboard
- version: v1beta
- spec:
- containers:
- - name: dashboard
- image: opensdsio/dashboard:latest
- env:
- - name: OPENSDS_AUTH_URL
- value: http://authchecker.opensds.svc.cluster.local/identity
- - name: OPENSDS_HOTPOT_URL
- value: http://apiserver.opensds.svc.cluster.local:50040
- - name: OPENSDS_GELATO_URL
- value: http://127.0.0.1:8089
- imagePullPolicy: IfNotPresent
- ports:
- - containerPort: 8088
----
diff --git a/install/opensds-authchecker/Dockerfile b/install/opensds-authchecker/Dockerfile
deleted file mode 100644
index 84a7f5065..000000000
--- a/install/opensds-authchecker/Dockerfile
+++ /dev/null
@@ -1,21 +0,0 @@
-# Docker build usage:
-# docker build . -t opensdsio/opensds-authchecker:base
-# docker run -d --privileged=true --name opensds-authchecker-base opensdsio/opensds-authchecker:base "/sbin/init"
-# docker exec -it opensds-authchecker-base /keystone.sh
-# docker commit opensds-authchecker-base opensdsio/opensds-authchecker:latest
-# docker rm -f opensds-authchecker-base
-# Docker run usage:
-# docker run -d --privileged=true --name opensds-authchecker opensdsio/opensds-authchecker:latest
-
-FROM ubuntu:16.04
-MAINTAINER Leon Wang
-
-COPY keystone.sh /keystone.sh
-COPY entrypoint.sh /entrypoint.sh
-
-# Install some packages before running command.
-RUN apt-get update && apt-get install -y \
- sudo nano git telnet net-tools iptables gnutls-bin ca-certificates && \
- mkdir -p /opt/stack/
-
-RUN ["chmod", "+x", "/keystone.sh", "/entrypoint.sh"]
diff --git a/install/opensds-authchecker/entrypoint.sh b/install/opensds-authchecker/entrypoint.sh
deleted file mode 100644
index 6428aa6bf..000000000
--- a/install/opensds-authchecker/entrypoint.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright 2019 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-get_default_host_ip() {
- local host_ip=$1
- local af=$2
- # Search for an IP unless an explicit is set by ``HOST_IP`` environment variable
- if [ -z "$host_ip" ]; then
- host_ip=""
- # Find the interface used for the default route
- host_ip_iface=${host_ip_iface:-$(ip -f $af route | awk '/default/ {print $5}' | head -1)}
- local host_ips
- host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | sed /temporary/d |awk /$af'/ {split($2,parts,"/"); print parts[1]}')
- local ip
- for ip in $host_ips; do
- host_ip=$ip
- break;
- done
- fi
- echo $host_ip
-}
-
-HOST_IP=$(get_default_host_ip "$HOST_IP" "inet")
-
-sed -i "s,^admin_endpoint.*$,admin_endpoint = http://$HOST_IP/identity,g" /etc/keystone/keystone.conf
-sed -i "s,^public_endpoint.*$,public_endpoint = http://$HOST_IP/identity,g" /etc/keystone/keystone.conf
-
-systemctl restart devstack@keystone.service
diff --git a/install/opensds-authchecker/keystone.sh b/install/opensds-authchecker/keystone.sh
deleted file mode 100644
index dd435011a..000000000
--- a/install/opensds-authchecker/keystone.sh
+++ /dev/null
@@ -1,131 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright 2019 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Default host ip.
-HOST_IP=0.0.0.0
-# OpenSDS version configuration.
-OPENSDS_VERSION=${OPENSDS_VERSION:-v1beta}
-# OpenSDS service name in keystone.
-OPENSDS_SERVER_NAME=${OPENSDS_SERVER_NAME:-opensds}
-
-# devstack keystone configuration
-STACK_GIT_BASE=${STACK_GIT_BASE:-https://git.openstack.org}
-STACK_USER_NAME=${STACK_USER_NAME:-stack}
-STACK_PASSWORD=${STACK_PASSWORD:-opensds@123}
-STACK_HOME=${STACK_HOME:-/opt/stack}
-STACK_BRANCH=${STACK_BRANCH:-stable/queens}
-DEV_STACK_DIR=$STACK_HOME/devstack
-
-# Multi-Cloud service name in keystone
-MULTICLOUD_SERVER_NAME=${MULTICLOUD_SERVER_NAME:-multicloud}
-# Multi-cloud
-MULTICLOUD_VERSION=${MULTICLOUD_VERSION:-v1}
-
-osds::keystone::create_user(){
- if id ${STACK_USER_NAME} &> /dev/null; then
- return
- fi
- sudo useradd -s /bin/bash -d ${STACK_HOME} -m ${STACK_USER_NAME}
- echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack
-}
-
-osds::keystone::devstack_local_conf(){
-DEV_STACK_LOCAL_CONF=${DEV_STACK_DIR}/local.conf
-cat > $DEV_STACK_LOCAL_CONF << DEV_STACK_LOCAL_CONF_DOCK
-[[local|localrc]]
-# use TryStack git mirror
-GIT_BASE=$STACK_GIT_BASE
-disable_service mysql
-enable_service postgresql
-# If the ``*_PASSWORD`` variables are not set here you will be prompted to enter
-# values for them by ``stack.sh``and they will be added to ``local.conf``.
-ADMIN_PASSWORD=$STACK_PASSWORD
-DATABASE_PASSWORD=$STACK_PASSWORD
-RABBIT_PASSWORD=$STACK_PASSWORD
-SERVICE_PASSWORD=$STACK_PASSWORD
-# Neither is set by default.
-HOST_IP=$HOST_IP
-# path of the destination log file. A timestamp will be appended to the given name.
-LOGFILE=\$DEST/logs/stack.sh.log
-# Old log files are automatically removed after 7 days to keep things neat. Change
-# the number of days by setting ``LOGDAYS``.
-LOGDAYS=2
-ENABLED_SERVICES=postgresql,key
-# Using stable/queens branches
-# ---------------------------------
-KEYSTONE_BRANCH=$STACK_BRANCH
-KEYSTONECLIENT_BRANCH=$STACK_BRANCH
-DEV_STACK_LOCAL_CONF_DOCK
-chown stack:stack $DEV_STACK_LOCAL_CONF
-}
-
-osds::keystone::create_user_and_endpoint(){
- . $DEV_STACK_DIR/openrc admin admin
-
- # for_hotpot
- openstack user create --domain default --password $STACK_PASSWORD $OPENSDS_SERVER_NAME
- openstack role add --project service --user opensds admin
- openstack group create service
- openstack group add user service opensds
- openstack role add service --project service --group service
- openstack group add user admins admin
- openstack service create --name opensds$OPENSDS_VERSION --description "OpenSDS Block Storage" opensds$OPENSDS_VERSION
- openstack endpoint create --region RegionOne opensds$OPENSDS_VERSION public http://$HOST_IP:50040/$OPENSDS_VERSION/%\(tenant_id\)s
- openstack endpoint create --region RegionOne opensds$OPENSDS_VERSION internal http://$HOST_IP:50040/$OPENSDS_VERSION/%\(tenant_id\)s
- openstack endpoint create --region RegionOne opensds$OPENSDS_VERSION admin http://$HOST_IP:50040/$OPENSDS_VERSION/%\(tenant_id\)s
-
- # for_gelato
- openstack user create --domain default --password "$STACK_PASSWORD" "$MULTICLOUD_SERVER_NAME"
- openstack role add --project service --user "$MULTICLOUD_SERVER_NAME" admin
- openstack group add user service "$MULTICLOUD_SERVER_NAME"
- openstack service create --name "multicloud$MULTICLOUD_VERSION" --description "Multi-cloud Block Storage" "multicloud$MULTICLOUD_VERSION"
- openstack endpoint create --region RegionOne "multicloud$MULTICLOUD_VERSION" public "http://$HOST_IP:8089/$MULTICLOUD_VERSION/%(tenant_id)s"
- openstack endpoint create --region RegionOne "multicloud$MULTICLOUD_VERSION" internal "http://$HOST_IP:8089/$MULTICLOUD_VERSION/%(tenant_id)s"
- openstack endpoint create --region RegionOne "multicloud$MULTICLOUD_VERSION" admin "http://$HOST_IP:8089/$MULTICLOUD_VERSION/%(tenant_id)s"
-}
-
-osds::keystone::delete_redundancy_data() {
- . $DEV_STACK_DIR/openrc admin admin
- openstack project delete demo
- openstack project delete alt_demo
- openstack project delete invisible_to_admin
- openstack user delete demo
- openstack user delete alt_demo
-}
-
-osds::keystone::download_code(){
- if [ ! -d ${DEV_STACK_DIR} ];then
- git clone ${STACK_GIT_BASE}/openstack-dev/devstack -b ${STACK_BRANCH} ${DEV_STACK_DIR}
- chown stack:stack -R ${DEV_STACK_DIR}
- fi
-
-}
-
-osds::keystone::install(){
- KEYSTONE_IP=$HOST_IP
- osds::keystone::create_user
- osds::keystone::download_code
-
- osds::keystone::devstack_local_conf
- cd ${DEV_STACK_DIR}
- su $STACK_USER_NAME -c ${DEV_STACK_DIR}/stack.sh
- osds::keystone::create_user_and_endpoint
- osds::keystone::delete_redundancy_data
-}
-
-osds::keystone::install
-# set entrypoint.sh as init command
-sed -i '14i\/entrypoint\.sh' /etc/rc.local
diff --git a/install/tools/pwdEncrypter/README.md b/install/tools/pwdEncrypter/README.md
deleted file mode 100644
index d69ec6f27..000000000
--- a/install/tools/pwdEncrypter/README.md
+++ /dev/null
@@ -1,9 +0,0 @@
-This is a password encryption tool provided by OpenSDS. User can use this tool to get cipher text and shell scripts can call it for automatic encryption during the deployment process.
-
-Steps for usage:
-
-1: Use go build command to compile go source file.
-
-2: Modify the pwdEncrypter.yaml to choose encryption tool.
-
-3: Run ./pwdEncrypter password to get cipher text.
\ No newline at end of file
diff --git a/install/tools/pwdEncrypter/passwordencrypter.go b/install/tools/pwdEncrypter/passwordencrypter.go
deleted file mode 100644
index fadee97e7..000000000
--- a/install/tools/pwdEncrypter/passwordencrypter.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "fmt"
- "os"
-
- "io/ioutil"
-
- "github.com/opensds/opensds/pkg/utils/pwd"
- "github.com/spf13/cobra"
- "gopkg.in/yaml.v2"
-)
-
-const (
- confFile = "./pwdEncrypter.yaml"
-)
-
-type tool struct {
- PwdEncrypter string `yaml:"PwdEncrypter,omitempty"`
-}
-
-var encrypterCommand = &cobra.Command{
- Use: "pwdEncrypter ",
- Short: "password encryption tool",
- Run: encrypter,
-}
-
-func encrypter(cmd *cobra.Command, args []string) {
- if len(args) == 0 {
- cmd.Usage()
- os.Exit(0)
- }
-
- if len(args) != 1 {
- fmt.Println("The number of args is not correct!")
- cmd.Usage()
- os.Exit(1)
- }
-
- // Initialize configuration file
- pwdEncrypter, err := loadConf(confFile)
- if err != nil {
- fmt.Println("Encrypt password error:", err)
- os.Exit(1)
- }
-
- // Encrypt the password
- encrypterTool := pwd.NewPwdEncrypter(pwdEncrypter.PwdEncrypter)
- plaintext, err := encrypterTool.Encrypter(args[0])
- if err != nil {
- fmt.Println("Encrypt password error:", err)
- os.Exit(1)
- }
-
- fmt.Println(plaintext)
-}
-
-func loadConf(f string) (*tool, error) {
- conf := &tool{}
- confYaml, err := ioutil.ReadFile(f)
- if err != nil {
- return nil, fmt.Errorf("Read config yaml file (%s) failed, reason:(%v)", f, err)
- }
- if err = yaml.Unmarshal(confYaml, conf); err != nil {
- return nil, fmt.Errorf("Parse error: %v", err)
- }
- return conf, nil
-}
-
-func main() {
- if err := encrypterCommand.Execute(); err != nil {
- fmt.Println("Encrypt password error:", err)
- os.Exit(1)
- }
-}
diff --git a/install/tools/pwdEncrypter/pwdEncrypter.yaml b/install/tools/pwdEncrypter/pwdEncrypter.yaml
deleted file mode 100644
index fc1176c24..000000000
--- a/install/tools/pwdEncrypter/pwdEncrypter.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-# Encryption tool. Default value is aes.
-PwdEncrypter: "aes"
\ No newline at end of file
diff --git a/openapi-spec/swagger.yaml b/openapi-spec/swagger.yaml
index 23aa5bc06..b73a578bf 100755
--- a/openapi-spec/swagger.yaml
+++ b/openapi-spec/swagger.yaml
@@ -2119,7 +2119,7 @@ security:
- basicAuth: []
externalDocs:
description: The offical OpenSDS Controller API specification
- url: 'https://github.com/opensds/opensds'
+ url: 'https://github.com/sodafoundation/api'
servers:
- url: 'https://localhost:50040/'
- url: 'http://localhost:50040/'
diff --git a/osdsctl/README.md b/osdsctl/README.md
deleted file mode 100644
index 9fa20ddd7..000000000
--- a/osdsctl/README.md
+++ /dev/null
@@ -1 +0,0 @@
-This is a CLI tool for connecting to OpenSDS client.
diff --git a/osdsctl/cli/cli.go b/osdsctl/cli/cli.go
deleted file mode 100644
index b28ea68f6..000000000
--- a/osdsctl/cli/cli.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-/*
-This module implements a entry into the OpenSDS CLI service.
-
-*/
-
-package cli
-
-import (
- "log"
- "os"
-
- c "github.com/opensds/opensds/client"
- "github.com/opensds/opensds/pkg/utils"
- "github.com/opensds/opensds/pkg/utils/constants"
- "github.com/spf13/cobra"
-)
-
-var (
- client *c.Client
- rootCommand = &cobra.Command{
- Use: "osdsctl",
- Short: "Administer the opensds storage cluster",
- Long: `Admin utility for the opensds unified storage cluster.`,
- Run: func(cmd *cobra.Command, args []string) {
- cmd.Usage()
- os.Exit(1)
- },
- }
- Debug bool
-)
-
-func init() {
- rootCommand.AddCommand(versionCommand)
- rootCommand.AddCommand(volumeCommand)
- rootCommand.AddCommand(dockCommand)
- rootCommand.AddCommand(poolCommand)
- rootCommand.AddCommand(profileCommand)
- rootCommand.AddCommand(fileShareCommand)
- rootCommand.AddCommand(hostCommand)
- flags := rootCommand.PersistentFlags()
- flags.BoolVar(&Debug, "debug", false, "shows debugging output.")
-}
-
-type DummyWriter struct{}
-
-// do nothing
-func (writer DummyWriter) Write(data []byte) (n int, err error) {
- return len(data), nil
-}
-
-type DebugWriter struct{}
-
-// do nothing
-func (writer DebugWriter) Write(data []byte) (n int, err error) {
- Debugf("%s", string(data))
- return len(data), nil
-}
-
-// Run method indicates how to start a cli tool through cobra.
-func Run() error {
- if !utils.Contained("--debug", os.Args) {
- log.SetOutput(DummyWriter{})
- } else {
- log.SetOutput(DebugWriter{})
- }
-
- ep, ok := os.LookupEnv(c.OpensdsEndpoint)
- if !ok {
- ep = constants.DefaultOpensdsEndpoint
- Warnf("OPENSDS_ENDPOINT is not specified, use default(%s)\n", ep)
- }
-
- cfg := &c.Config{Endpoint: ep}
-
- authStrategy, ok := os.LookupEnv(c.OpensdsAuthStrategy)
- if !ok {
- authStrategy = c.Noauth
- Warnf("Not found Env OPENSDS_AUTH_STRATEGY, use default(noauth)\n")
- }
-
- var authOptions c.AuthOptions
- var err error
-
- switch authStrategy {
- case c.Keystone:
- authOptions, err = c.LoadKeystoneAuthOptionsFromEnv()
- if err != nil {
- return err
- }
- case c.Noauth:
- authOptions = c.LoadNoAuthOptionsFromEnv()
- default:
- authOptions = c.NewNoauthOptions(constants.DefaultTenantId)
- }
-
- cfg.AuthOptions = authOptions
-
- client, err = c.NewClient(cfg)
- if client == nil || err != nil {
- return err
- }
-
- return rootCommand.Execute()
-}
diff --git a/osdsctl/cli/common.go b/osdsctl/cli/common.go
deleted file mode 100644
index f58cadf17..000000000
--- a/osdsctl/cli/common.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cli
-
-import (
- "fmt"
- "net/http"
- "os"
-
- c "github.com/opensds/opensds/client"
- "github.com/spf13/cobra"
-)
-
-const (
- errorPrefix = "ERROR:"
- debugPrefix = "DEBUG:"
- warnPrefix = "WARNING:"
-)
-
-func Printf(format string, a ...interface{}) (n int, err error) {
- return fmt.Fprintf(os.Stdout, format, a...)
-}
-
-func Debugf(format string, a ...interface{}) (n int, err error) {
- if Debug {
- return fmt.Fprintf(os.Stdout, debugPrefix+" "+format, a...)
- }
- return 0, nil
-}
-
-func Warnf(format string, a ...interface{}) (n int, err error) {
- return fmt.Fprintf(os.Stdout, warnPrefix+" "+format, a...)
-}
-
-func Errorf(format string, a ...interface{}) (n int, err error) {
- return fmt.Fprintf(os.Stderr, errorPrefix+" "+format, a...)
-}
-
-func Fatalf(format string, a ...interface{}) {
- fmt.Fprintf(os.Stderr, errorPrefix+" "+format, a...)
- os.Exit(-1)
-}
-
-func Println(a ...interface{}) (n int, err error) {
- return fmt.Fprintln(os.Stdout, a...)
-}
-
-func Debugln(a ...interface{}) (n int, err error) {
- if Debug {
- a = append([]interface{}{debugPrefix}, a...)
- return fmt.Fprintln(os.Stdout, a...)
- }
- return 0, nil
-}
-
-func Warnln(a ...interface{}) (n int, err error) {
- a = append([]interface{}{warnPrefix}, a...)
- return fmt.Fprintln(os.Stdout, a...)
-}
-
-func Errorln(a ...interface{}) (n int, err error) {
- a = append([]interface{}{errorPrefix}, a...)
- return fmt.Fprintln(os.Stderr, a...)
-}
-
-func Fatalln(a ...interface{}) {
- a = append([]interface{}{errorPrefix}, a...)
- fmt.Fprintln(os.Stderr, a...)
- os.Exit(-1)
-}
-
-// Strip some redundant message from client http error.
-func HttpErrStrip(err error) error {
- if httpErr, ok := err.(*c.HttpError); ok {
- httpErr.Decode()
- if len(httpErr.Msg) == 0 {
- return fmt.Errorf("%d %s", httpErr.Code, http.StatusText(httpErr.Code))
- }
- return fmt.Errorf(httpErr.Msg)
- }
- return err
-}
-
-func ArgsNumCheck(cmd *cobra.Command, args []string, invalidNum int) {
- if len(args) != invalidNum {
- Errorln("The number of args is not correct!")
- cmd.Usage()
- os.Exit(1)
- }
-}
diff --git a/osdsctl/cli/dock.go b/osdsctl/cli/dock.go
deleted file mode 100644
index 094e06819..000000000
--- a/osdsctl/cli/dock.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements a entry into the OpenSDS service.
-
-*/
-
-package cli
-
-import (
- "os"
-
- "github.com/spf13/cobra"
-)
-
-var dockCommand = &cobra.Command{
- Use: "dock",
- Short: "manage OpenSDS dock resources",
- Run: dockAction,
-}
-
-var dockShowCommand = &cobra.Command{
- Use: "show ",
- Short: "show information of specified dock",
- Run: dockShowAction,
-}
-
-var dockListCommand = &cobra.Command{
- Use: "list",
- Short: "get all dock resources",
- Run: dockListAction,
-}
-
-var (
- dockLimit string
- dockOffset string
- dockSortDir string
- dockSortKey string
- dockId string
- dockName string
- dockDescription string
- dockStatus string
- dockStorageType string
- dockEndpoint string
- dockDriverName string
-)
-
-func init() {
- dockListCommand.Flags().StringVarP(&dockLimit, "limit", "", "50", "the number of ertries displayed per page")
- dockListCommand.Flags().StringVarP(&dockOffset, "offset", "", "0", "all requested data offsets")
- dockListCommand.Flags().StringVarP(&dockSortDir, "sortDir", "", "desc", "the sort direction of all requested data. supports asc or desc(default)")
- dockListCommand.Flags().StringVarP(&dockSortKey, "sortKey", "", "id", "the sort key of all requested data. supports id(default), name, status, endpoint, drivername, description")
- dockListCommand.Flags().StringVarP(&dockId, "id", "", "", "list docks by id")
- dockListCommand.Flags().StringVarP(&dockName, "name", "", "", "list docks by name")
- dockListCommand.Flags().StringVarP(&dockDescription, "description", "", "", "list docks by description")
- dockListCommand.Flags().StringVarP(&dockStatus, "status", "", "", "list docks by status")
- dockListCommand.Flags().StringVarP(&dockStorageType, "storageType", "", "", "list docks by storage type")
- dockListCommand.Flags().StringVarP(&dockEndpoint, "endpoint", "", "", "list docks by endpoint")
- dockListCommand.Flags().StringVarP(&dockDriverName, "driverName", "", "", "list docks by driver name")
-
- dockCommand.AddCommand(dockShowCommand)
- dockCommand.AddCommand(dockListCommand)
-
-}
-
-func dockAction(cmd *cobra.Command, args []string) {
- cmd.Usage()
- os.Exit(1)
-}
-
-var dockFormatters = FormatterList{"Metadata": JsonFormatter}
-
-func dockShowAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- resp, err := client.GetDock(args[0])
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "CreatedAt", "UpdatedAt", "Name", "Description", "Endpoint", "DriverName",
- "Metadata"}
- PrintDict(resp, keys, dockFormatters)
-}
-
-func dockListAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 0)
- var opts = map[string]string{"limit": dockLimit, "offset": dockOffset, "sortDir": dockSortDir,
- "sortKey": dockSortKey, "Id": dockId,
- "Name": dockName, "Description": dockDescription, "DriverName": dockDriverName,
- "Endpoint": dockEndpoint, "Status": dockStatus, "StorageType": dockStorageType}
-
- resp, err := client.ListDocks(opts)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "Name", "Description", "Endpoint", "DriverName"}
- PrintList(resp, keys, dockFormatters)
-}
diff --git a/osdsctl/cli/dock_test.go b/osdsctl/cli/dock_test.go
deleted file mode 100644
index bf9e3437e..000000000
--- a/osdsctl/cli/dock_test.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cli
-
-import (
- "os"
- "os/exec"
- "testing"
-
- c "github.com/opensds/opensds/client"
-)
-
-func init() {
- client = c.NewFakeClient(&c.Config{Endpoint: c.TestEp})
-}
-
-func TestDockAction(t *testing.T) {
- beCrasher := os.Getenv("BE_CRASHER")
-
- if beCrasher == "1" {
- var args []string
- dockAction(dockCommand, args)
-
- return
- }
-
- cmd := exec.Command(os.Args[0], "-test.run=TestDockAction")
- cmd.Env = append(os.Environ(), "BE_CRASHER=1")
- err := cmd.Run()
- e, ok := err.(*exec.ExitError)
-
- if ok && ("exit status 1" == e.Error()) {
- return
- }
-
- t.Fatalf("process ran with %s, want exit status 1", e.Error())
-}
-
-func TestDockShowAction(t *testing.T) {
- var args []string
- args = append(args, "b7602e18-771e-11e7-8f38-dbd6d291f4e0")
- dockShowAction(dockShowCommand, args)
-}
-
-func TestDockListAction(t *testing.T) {
- var args []string
- dockListAction(dockListCommand, args)
-}
diff --git a/osdsctl/cli/fileshare.go b/osdsctl/cli/fileshare.go
deleted file mode 100644
index a5aefd053..000000000
--- a/osdsctl/cli/fileshare.go
+++ /dev/null
@@ -1,209 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-/*
-This module implements a entry into the OpenSDS service.
-
-*/
-
-package cli
-
-import (
- "log"
- "os"
- "strconv"
-
- "github.com/opensds/opensds/pkg/model"
- "github.com/spf13/cobra"
-)
-
-var fileShareCommand = &cobra.Command{
- Use: "fileshare",
- Short: "manage fileshares in the cluster",
- Run: fileShareAction,
-}
-
-var fileShareCreateCommand = &cobra.Command{
- Use: "create ",
- Short: "create a fileshare in the cluster",
- Run: fileShareCreateAction,
-}
-
-var fileShareDeleteCommand = &cobra.Command{
- Use: "delete ",
- Short: "delete a fileshare in the cluster",
- Run: fileShareDeleteAction,
-}
-
-var fileShareShowCommand = &cobra.Command{
- Use: "show ",
- Short: "show a fileshare in the cluster",
- Run: fileShareShowAction,
-}
-
-var fileShareListCommand = &cobra.Command{
- Use: "list",
- Short: "list all fileshares in the cluster",
- Run: fileShareListAction,
-}
-
-var fileShareUpdateCommand = &cobra.Command{
- Use: "update ",
- Short: "update a fileshare in the cluster",
- Run: fileShareUpdateAction,
-}
-
-var (
- shareAZ string
- shareDescription string
- shareID string
- shareName string
- sharePoolID string
- shareProfileID string
- shareStatus string
- shareTenantID string
- shareUserID string
- shareCreatedAt string
- shareUpdatedAt string
-
- shareLimit string
- shareOffset string
- shareSortDir string
- shareSortKey string
-
- shareFormatters = FormatterList{"Metadata": JsonFormatter}
-)
-
-func init() {
- fileShareCommand.AddCommand(fileShareCreateCommand)
- fileShareCommand.AddCommand(fileShareDeleteCommand)
- fileShareCommand.AddCommand(fileShareShowCommand)
- fileShareCommand.AddCommand(fileShareListCommand)
- fileShareCommand.AddCommand(fileShareUpdateCommand)
- fileShareCommand.AddCommand(fileShareSnapshotCommand)
- fileShareCommand.AddCommand(fileShareAclCommand)
-
- fileShareCreateCommand.Flags().StringVarP(&shareName, "name", "n", "", "the name of the fileshare")
- fileShareCreateCommand.Flags().StringVarP(&shareDescription, "description", "d", "", "the description of the fileshare")
- fileShareCreateCommand.Flags().StringVarP(&shareAZ, "availabilityZone", "a", "", "the locality that fileshare belongs to")
- fileShareCreateCommand.Flags().StringVarP(&shareProfileID, "profileId", "p", "", "the uuid of the profile which the fileshare belongs to")
-
- fileShareListCommand.Flags().StringVarP(&shareLimit, "limit", "", "50", "the number of ertries displayed per page")
- fileShareListCommand.Flags().StringVarP(&shareOffset, "offset", "", "0", "all requested data offsets")
- fileShareListCommand.Flags().StringVarP(&shareSortDir, "sortDir", "", "desc", "the sort direction of all requested data. supports asc or desc(default)")
- fileShareListCommand.Flags().StringVarP(&shareSortKey, "sortKey", "", "id",
- "the sort key of all requested data. supports id(default), createdAt, updatedAt, name, status, availabilityZone, profileId, tenantId, userId, size, poolId, description")
- fileShareListCommand.Flags().StringVarP(&shareID, "id", "", "", "list share by id")
- fileShareListCommand.Flags().StringVarP(&shareName, "name", "", "", "list share by name")
- fileShareListCommand.Flags().StringVarP(&shareDescription, "description", "", "", "list share by description")
- fileShareListCommand.Flags().StringVarP(&shareTenantID, "tenantId", "", "", "list share by tenantId")
- fileShareListCommand.Flags().StringVarP(&shareUserID, "userId", "", "", "list share by userId")
- fileShareListCommand.Flags().StringVarP(&shareStatus, "status", "", "", "list share by status")
- fileShareListCommand.Flags().StringVarP(&sharePoolID, "poolId", "", "", "list share by poolId")
- fileShareListCommand.Flags().StringVarP(&shareAZ, "availabilityZone", "", "", "list share by availabilityZone")
- fileShareListCommand.Flags().StringVarP(&shareProfileID, "profileId", "", "", "list share by profileId")
- fileShareListCommand.Flags().StringVarP(&shareSize, "size", "", "", "list share by size")
- fileShareListCommand.Flags().StringVarP(&shareCreatedAt, "createdAt", "", "", "list share by createdAt")
- fileShareListCommand.Flags().StringVarP(&shareUpdatedAt, "updatedAt", "", "", "list share by updatedAt")
-
- fileShareUpdateCommand.Flags().StringVarP(&shareName, "name", "n", "", "the name of the fileshare")
- fileShareUpdateCommand.Flags().StringVarP(&shareDescription, "description", "d", "", "the description of the fileshare")
-}
-
-func fileShareAction(cmd *cobra.Command, args []string) {
- cmd.Usage()
- os.Exit(1)
-}
-
-func fileShareCreateAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- size, err := strconv.Atoi(args[0])
- if err != nil {
- log.Fatalf("error parsing size %s: %+v", args[0], err)
- }
-
- share := &model.FileShareSpec{
- Description: shareDescription,
- Name: shareName,
- Size: int64(size),
- AvailabilityZone: shareAZ,
- ProfileId: shareProfileID,
- }
-
- resp, err := client.CreateFileShare(share)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-
- keys := KeyList{"Id", "CreatedAt", "Name", "Description", "Size",
- "AvailabilityZone", "Status", "PoolId", "ProfileId", "Protocols",
- "TenantId", "UserId", "SnapshotId", "ExportLocations", "Metadata"}
- PrintDict(resp, keys, shareFormatters)
-}
-
-func fileShareDeleteAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- err := client.DeleteFileShare(args[0])
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-}
-
-func fileShareShowAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- resp, err := client.GetFileShare(args[0])
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-
- keys := KeyList{"Id", "CreatedAt", "UpdatedAt", "Name", "Description", "Size",
- "AvailabilityZone", "Status", "PoolId", "ProfileId", "Protocols",
- "TenantId", "UserId", "SnapshotId", "ExportLocations", "Metadata"}
- PrintDict(resp, keys, shareFormatters)
-}
-
-func fileShareListAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 0)
-
- var opts = map[string]string{"limit": shareLimit, "offset": shareOffset, "sortDir": shareSortDir,
- "sortKey": shareSortKey, "Id": shareID, "Name": shareName, "Description": shareDescription,
- "TenantId": shareTenantID, "UserId": shareUserID, "AvailabilityZone": shareAZ, "Status": shareStatus,
- "PoolId": sharePoolID, "ProfileId": shareProfileID, "CreatedAt": shareCreatedAt,
- "UpdatedAt": shareUpdatedAt, "Size": shareSize}
-
- resp, err := client.ListFileShares(opts)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-
- keys := KeyList{"Id", "Name", "Description", "Size", "Status", "ProfileId", "Protocols", "ExportLocations"}
- PrintList(resp, keys, shareFormatters)
-}
-
-func fileShareUpdateAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- share := &model.FileShareSpec{
- Name: shareName,
- Description: shareDescription,
- }
-
- resp, err := client.UpdateFileShare(args[0], share)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-
- keys := KeyList{"Id", "UpdatedAt", "Name", "Description", "Size",
- "AvailabilityZone", "Status", "PoolId", "ProfileId", "Protocols",
- "TenantId", "UserId", "SnapshotId", "ExportLocations", "Metadata"}
- PrintDict(resp, keys, shareFormatters)
-}
diff --git a/osdsctl/cli/fileshare_test.go b/osdsctl/cli/fileshare_test.go
deleted file mode 100644
index eb7e3e0df..000000000
--- a/osdsctl/cli/fileshare_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cli
-
-import (
- "os"
- "os/exec"
- "testing"
-
- c "github.com/opensds/opensds/client"
-)
-
-func init() {
- client = c.NewFakeClient(&c.Config{Endpoint: c.TestEp})
-}
-
-func TestFileShareAction(t *testing.T) {
- beCrasher := os.Getenv("BE_CRASHER")
-
- if beCrasher == "1" {
- var args []string
- fileShareAction(fileShareCommand, args)
-
- return
- }
-
- cmd := exec.Command(os.Args[0], "-test.run=TestFileShareAction")
- cmd.Env = append(os.Environ(), "BE_CRASHER=1")
- err := cmd.Run()
- e, ok := err.(*exec.ExitError)
-
- if ok && ("exit status 1" == e.Error()) {
- return
- }
-
- t.Fatalf("process ran with %s, want exit status 1", e.Error())
-}
-
-func TestFileShareCreateAction(t *testing.T) {
- var args []string
- args = append(args, "1")
- fileShareCreateAction(fileShareCreateCommand, args)
-}
-
-func TestFileShareShowAction(t *testing.T) {
- var args []string
- args = append(args, "bd5b12a8-a101-11e7-941e-d77981b584d8")
- fileShareShowAction(fileShareShowCommand, args)
-}
-
-func TestFileShareListAction(t *testing.T) {
- var args []string
- fileShareListAction(fileShareListCommand, args)
-}
-
-func TestFileShareDeleteAction(t *testing.T) {
- var args []string
- args = append(args, "bd5b12a8-a101-11e7-941e-d77981b584d8")
- fileShareDeleteAction(fileShareDeleteCommand, args)
-}
-
-func TestFileShareUpdateAction(t *testing.T) {
- var args []string
- args = append(args, "bd5b12a8-a101-11e7-941e-d77981b584d8")
- fileShareUpdateAction(fileShareDeleteCommand, args)
-}
diff --git a/osdsctl/cli/fileshareacl.go b/osdsctl/cli/fileshareacl.go
deleted file mode 100644
index 0c9f70ca2..000000000
--- a/osdsctl/cli/fileshareacl.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements a entry into the OpenSDS service.
-
-*/
-
-package cli
-
-import (
- "os"
-
- "github.com/opensds/opensds/pkg/model"
- "github.com/spf13/cobra"
-)
-
-var fileShareAclCommand = &cobra.Command{
- Use: "acl",
- Short: "manage fileshare acls in the cluster",
- Run: fileShareAclAction,
-}
-
-var fileShareAclCreateCommand = &cobra.Command{
- Use: "create ",
- Short: "create a acl of specified fileshare in the cluster",
- Example: "osdsctl fileshare acl create -a 10.0.0.10 -c \"Write\" -t ip 87be9ce5-6ecc-4ac3-8d6c-f5a58c9110e4",
- Run: fileShareAclCreateAction,
-}
-
-var fileShareAclDeleteCommand = &cobra.Command{
- Use: "delete ",
- Short: "delete a fileshare acl of specified fileshare in the cluster",
- Run: fileShareAclDeleteAction,
-}
-
-var fileShareAclShowCommand = &cobra.Command{
- Use: "show ",
- Short: "show a fileshare acl in the cluster",
- Run: fileShareAclShowAction,
-}
-
-var fileShareAclListCommand = &cobra.Command{
- Use: "list",
- Short: "list all fileshare acls in the cluster",
- Run: fileSharesAclListAction,
-}
-
-var (
- shareAclType string
- shareAclAccessCapability []string
- shareAclAccessTo string
- shareAclDesp string
-
- shareAclFormatters = FormatterList{"Metadata": JsonFormatter}
-)
-
-func init() {
- fileShareAclCommand.AddCommand(fileShareAclCreateCommand)
- fileShareAclCommand.AddCommand(fileShareAclDeleteCommand)
- fileShareAclCommand.AddCommand(fileShareAclShowCommand)
- fileShareAclCommand.AddCommand(fileShareAclListCommand)
-
- fileShareAclCreateCommand.Flags().StringVarP(&shareAclType, "type", "t", "", "the type of access. The Only current supported type is: ip")
- fileShareAclCreateCommand.Flags().StringSliceVarP(&shareAclAccessCapability, "capability", "c", shareAclAccessCapability, "the accessCapability \"Read\" or \"Write\" for fileshare")
- fileShareAclCreateCommand.Flags().StringVarP(&shareAclAccessTo, "accessTo", "a", "", "accessTo of the fileshare. A valid IPv4 format is supported")
- fileShareAclCreateCommand.Flags().StringVarP(&shareAclDesp, "description", "d", "", "the description of of the fileshare acl")
-}
-
-func fileShareAclAction(cmd *cobra.Command, args []string) {
- cmd.Usage()
- os.Exit(1)
-}
-
-func fileShareAclCreateAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- acl := &model.FileShareAclSpec{
- FileShareId: args[0],
- Type: shareAclType,
- AccessCapability: shareAclAccessCapability,
- AccessTo: shareAclAccessTo,
- Description: shareAclDesp,
- }
-
- resp, err := client.CreateFileShareAcl(acl)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-
- keys := KeyList{"Id", "CreatedAt", "TenantId", "FileShareId", "Metadata",
- "Type", "AccessCapability", "AccessTo", "Description"}
- PrintDict(resp, keys, shareAclFormatters)
-}
-
-func fileShareAclDeleteAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
-
- err := client.DeleteFileShareAcl(args[0])
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-}
-
-func fileShareAclShowAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- resp, err := client.GetFileShareAcl(args[0])
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-
- keys := KeyList{"Id", "CreatedAt", "UpdatedAt", "TenantId", "FileShareId",
- "Type", "AccessCapability", "AccessTo", "Description", "Metadata"}
- PrintDict(resp, keys, shareAclFormatters)
-}
-
-func fileSharesAclListAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 0)
- resp, err := client.ListFileSharesAcl()
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-
- keys := KeyList{"Id", "FileShareId",
- "Type", "AccessCapability", "AccessTo", "Description"}
- PrintList(resp, keys, shareAclFormatters)
-}
diff --git a/osdsctl/cli/fileshareacl_test.go b/osdsctl/cli/fileshareacl_test.go
deleted file mode 100644
index 50c304c91..000000000
--- a/osdsctl/cli/fileshareacl_test.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cli
-
-import (
- "os"
- "os/exec"
- "testing"
-
- c "github.com/opensds/opensds/client"
-)
-
-func init() {
- client = c.NewFakeClient(&c.Config{Endpoint: c.TestEp})
-}
-
-func TestFileShareAclAction(t *testing.T) {
- beCrasher := os.Getenv("BE_CRASHER")
-
- if beCrasher == "1" {
- var args []string
- fileShareAclAction(fileShareAclCommand, args)
-
- return
- }
-
- cmd := exec.Command(os.Args[0], "-test.run=TestFileShareAclAction")
- cmd.Env = append(os.Environ(), "BE_CRASHER=1")
- err := cmd.Run()
- e, ok := err.(*exec.ExitError)
-
- if ok && ("exit status 1" == e.Error()) {
- return
- }
-
- t.Fatalf("process ran with %s, want exit status 1", e.Error())
-}
-
-func TestFileShareAclCreateAction(t *testing.T) {
- var args []string
- args = append(args, "bd5b12a8-a101-11e7-941e-d77981b584d8")
- fileShareAclCreateAction(fileShareAclCreateCommand, args)
-}
-
-func TestFileShareAclDeleteAction(t *testing.T) {
- var args []string
- args = append(args, "d2975ebe-d82c-430f-b28e-f373746a71ca")
- fileShareAclDeleteAction(fileShareAclDeleteCommand, args)
-}
-
-func TestFileShareAclShowAction(t *testing.T) {
- var args []string
- args = append(args, "d2975ebe-d82c-430f-b28e-f373746a71ca")
- fileShareAclShowAction(fileShareAclShowCommand, args)
-}
-
-func TestFileShareAclListAction(t *testing.T) {
- var args []string
- fileSharesAclListAction(fileShareAclListCommand, args)
-}
diff --git a/osdsctl/cli/filesharesnapshot.go b/osdsctl/cli/filesharesnapshot.go
deleted file mode 100644
index 4359d5cba..000000000
--- a/osdsctl/cli/filesharesnapshot.go
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements a entry into the OpenSDS service.
-
-*/
-
-package cli
-
-import (
- "os"
-
- "github.com/opensds/opensds/pkg/model"
- "github.com/spf13/cobra"
-)
-
-var fileShareSnapshotCommand = &cobra.Command{
- Use: "snapshot",
- Short: "manage fileshare snapshots in the cluster",
- Run: fileShareSnapshotAction,
-}
-
-var fileShareSnapshotCreateCommand = &cobra.Command{
- Use: "create ",
- Short: "create a snapshot of specified fileshare in the cluster",
- Run: fileShareSnapshotCreateAction,
-}
-
-var fileShareSnapshotShowCommand = &cobra.Command{
- Use: "show ",
- Short: "show a fileshare snapshot in the cluster",
- Run: fileShareSnapshotShowAction,
-}
-
-var fileShareSnapshotListCommand = &cobra.Command{
- Use: "list",
- Short: "list all fileshare snapshots in the cluster",
- Run: fileShareSnapshotListAction,
-}
-
-var fileShareSnapshotDeleteCommand = &cobra.Command{
- Use: "delete ",
- Short: "delete a fileshare snapshot of specified fileshare in the cluster",
- Run: fileShareSnapshotDeleteAction,
-}
-
-var fileShareSnapshotUpdateCommand = &cobra.Command{
- Use: "update ",
- Short: "update a fileshare snapshot in the cluster",
- Run: fileShareSnapshotUpdateAction,
-}
-
-var (
- shareSnapshotName string
- shareSnapshotDesp string
-)
-
-var (
- shareSnapLimit string
- shareSnapOffset string
- shareSnapSortDir string
- shareSnapSortKey string
- shareSnapID string
- shareSnapUserID string
- shareSnapName string
- shareSnapDesp string
- shareSnapStatus string
- shareSize string
- shareSnapSize string
- shareSnapTenantID string
- shareSnapProfileID string
- fileshareID string
- shareSnapCreatedAt string
- shareSnapUpdatedAt string
-
- shareSnapFormatters = FormatterList{"Metadata": JsonFormatter}
-)
-
-func init() {
- fileShareSnapshotCommand.AddCommand(fileShareSnapshotCreateCommand)
- fileShareSnapshotCommand.AddCommand(fileShareSnapshotDeleteCommand)
- fileShareSnapshotCommand.AddCommand(fileShareSnapshotShowCommand)
- fileShareSnapshotCommand.AddCommand(fileShareSnapshotListCommand)
- fileShareSnapshotCommand.AddCommand(fileShareSnapshotUpdateCommand)
-
- fileShareSnapshotCreateCommand.Flags().StringVarP(&shareSnapName, "name", "n", "", "the name of the fileshare snapshot")
- fileShareSnapshotCreateCommand.Flags().StringVarP(&shareSnapDesp, "description", "d", "", "the description of the fileshare snapshot")
- fileShareSnapshotCreateCommand.Flags().StringVarP(&shareSnapProfileID, "profileId", "p", "", "the uuid of the profile which the fileshare snapshot belongs to")
-
- fileShareSnapshotListCommand.Flags().StringVarP(&shareSnapLimit, "limit", "", "50", "the number of ertries displayed per page")
- fileShareSnapshotListCommand.Flags().StringVarP(&shareSnapOffset, "offset", "", "0", "all requested data offsets")
- fileShareSnapshotListCommand.Flags().StringVarP(&shareSnapSortDir, "sortDir", "", "desc", "the sort direction of all requested data. supports asc or desc(default)")
- fileShareSnapshotListCommand.Flags().StringVarP(&shareSnapSortKey, "sortKey", "", "id",
- "the sort key of all requested data. supports id(default), createdAt, updatedAt, name, description, snapshotSize, status, userId, tenantId, fileshareId, profileId")
- fileShareSnapshotListCommand.Flags().StringVarP(&shareSnapID, "id", "", "", "list fileshare snapshot by id")
- fileShareSnapshotListCommand.Flags().StringVarP(&shareSnapName, "name", "", "", "list fileshare snapshot by name")
- fileShareSnapshotListCommand.Flags().StringVarP(&shareSnapDesp, "description", "", "", "list fileshare snapshot by description")
- fileShareSnapshotListCommand.Flags().StringVarP(&shareSnapSize, "snapshotSize", "", "", "list fileshare snapshot by snapshotSize")
- fileShareSnapshotListCommand.Flags().StringVarP(&shareSnapStatus, "status", "", "", "list fileshare snapshot by status")
- fileShareSnapshotListCommand.Flags().StringVarP(&shareSnapUserID, "userId", "", "", "list fileshare snapshot by userId")
- fileShareSnapshotListCommand.Flags().StringVarP(&shareSnapTenantID, "tenantId", "", "", "list fileshare snapshot by tenantId")
- fileShareSnapshotListCommand.Flags().StringVarP(&fileshareID, "fileshareId", "", "", "list fileshare snapshot by fileshareId")
- fileShareSnapshotListCommand.Flags().StringVarP(&shareSnapProfileID, "profileId", "", "", "list fileshare snapshot by profileId")
- fileShareSnapshotListCommand.Flags().StringVarP(&shareSnapCreatedAt, "createdAt", "", "", "list fileshare snapshot by createdAt")
- fileShareSnapshotListCommand.Flags().StringVarP(&shareSnapUpdatedAt, "updatedAt", "", "", "list fileshare snapshot by updatedAt")
-
- fileShareSnapshotUpdateCommand.Flags().StringVarP(&shareSnapshotName, "name", "n", "", "the name of the fileshare snapshot")
- fileShareSnapshotUpdateCommand.Flags().StringVarP(&shareSnapshotDesp, "description", "d", "", "the description of the fileshare snapshot")
-}
-
-func fileShareSnapshotAction(cmd *cobra.Command, args []string) {
- cmd.Usage()
- os.Exit(1)
-}
-
-func fileShareSnapshotCreateAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- snp := &model.FileShareSnapshotSpec{
- Name: shareSnapName,
- Description: shareSnapDesp,
- FileShareId: args[0],
- ProfileId: shareSnapProfileID,
- }
-
- resp, err := client.CreateFileShareSnapshot(snp)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-
- keys := KeyList{"Id", "CreatedAt", "Name", "Description", "ShareSize",
- "Status", "FileShareId", "SnapshotSize", "TenantId", "UserId",
- "ProfileId", "Metadata"}
- PrintDict(resp, keys, shareSnapFormatters)
-}
-
-func fileShareSnapshotShowAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- resp, err := client.GetFileShareSnapshot(args[0])
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-
- keys := KeyList{"Id", "CreatedAt", "UpdatedAt", "Name", "Description",
- "ShareSize", "Status", "FileShareId", "SnapshotSize", "TenantId",
- "UserId", "ProfileId", "Metadata"}
- PrintDict(resp, keys, shareSnapFormatters)
-}
-
-func fileShareSnapshotListAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 0)
-
- var opts = map[string]string{"limit": shareSnapLimit, "offset": shareSnapOffset, "sortDir": shareSnapSortDir,
- "sortKey": shareSnapSortKey, "Id": shareSnapID,
- "Name": shareSnapName, "Description": shareSnapDesp, "UserId": shareSnapUserID,
- "Status": shareSnapStatus, "CreatedAt": shareSnapCreatedAt, "UpdatedAt": shareSnapUpdatedAt,
- "Size": shareSnapSize, "TenantId": shareSnapTenantID, "FileShareId": fileshareID}
-
- resp, err := client.ListFileShareSnapshots(opts)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-
- keys := KeyList{"Id", "Name", "Description", "Status",
- "FileShareId", "ProfileId", "SnapshotSize"}
- PrintList(resp, keys, shareSnapFormatters)
-}
-
-func fileShareSnapshotDeleteAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
-
- err := client.DeleteFileShareSnapshot(args[0])
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-}
-
-func fileShareSnapshotUpdateAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- snp := &model.FileShareSnapshotSpec{
- Name: shareSnapshotName,
- Description: shareSnapshotDesp,
- }
-
- resp, err := client.UpdateFileShareSnapshot(args[0], snp)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-
- keys := KeyList{"Id", "UpdatedAt", "Name", "Description", "ShareSize",
- "Status", "FileShareId", "SnapshotSize", "TenantId", "UserId",
- "ProfileId", "Metadata"}
- PrintDict(resp, keys, shareSnapFormatters)
-}
diff --git a/osdsctl/cli/filesharesnapshot_test.go b/osdsctl/cli/filesharesnapshot_test.go
deleted file mode 100644
index e803cea33..000000000
--- a/osdsctl/cli/filesharesnapshot_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cli
-
-import (
- "os"
- "os/exec"
- "testing"
-
- c "github.com/opensds/opensds/client"
-)
-
-func init() {
- client = c.NewFakeClient(&c.Config{Endpoint: c.TestEp})
-}
-
-func TestFileShareSnapshotAction(t *testing.T) {
- beCrasher := os.Getenv("BE_CRASHER")
-
- if beCrasher == "1" {
- var args []string
- fileShareSnapshotAction(fileShareSnapshotCommand, args)
-
- return
- }
-
- cmd := exec.Command(os.Args[0], "-test.run=TestFileShareSnapshotAction")
- cmd.Env = append(os.Environ(), "BE_CRASHER=1")
- err := cmd.Run()
- e, ok := err.(*exec.ExitError)
-
- if ok && ("exit status 1" == e.Error()) {
- return
- }
-
- t.Fatalf("process ran with %s, want exit status 1", e.Error())
-}
-
-func TestFileShareSnapshotCreateAction(t *testing.T) {
- var args []string
- args = append(args, "bd5b12a8-a101-11e7-941e-d77981b584d8")
- fileShareSnapshotCreateAction(fileShareSnapshotCreateCommand, args)
-}
-
-func TestFileShareSnapshotShowAction(t *testing.T) {
- var args []string
- args = append(args, "3769855c-a102-11e7-b772-17b880d2f537")
- fileShareSnapshotShowAction(fileShareSnapshotShowCommand, args)
-}
-
-func TestFileShareSnapshotListAction(t *testing.T) {
- var args []string
- fileShareSnapshotListAction(fileShareSnapshotListCommand, args)
-}
-
-func TestFileShareSnapshotDeleteAction(t *testing.T) {
- var args []string
- args = append(args, "3769855c-a102-11e7-b772-17b880d2f537")
- fileShareSnapshotDeleteAction(fileShareSnapshotDeleteCommand, args)
-}
-
-func TestFileShareSnapshotUpdateAction(t *testing.T) {
- var args []string
- args = append(args, "3769855c-a102-11e7-b772-17b880d2f537")
- fileShareSnapshotUpdateAction(fileShareSnapshotDeleteCommand, args)
-}
diff --git a/osdsctl/cli/host.go b/osdsctl/cli/host.go
deleted file mode 100644
index 1373df940..000000000
--- a/osdsctl/cli/host.go
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-/*
-This module implements a entry into the OpenSDS service.
-
-*/
-
-package cli
-
-import (
- "os"
-
- "github.com/opensds/opensds/pkg/model"
- "github.com/spf13/cobra"
-)
-
-var hostCommand = &cobra.Command{
- Use: "host",
- Short: "manage hosts in the cluster",
- Run: hostAction,
-}
-
-var hostCreateCommand = &cobra.Command{
- Use: "create ",
- Short: "create a host in the cluster",
- Run: hostCreateAction,
-}
-
-var hostShowCommand = &cobra.Command{
- Use: "show ",
- Short: "show a host in the cluster",
- Run: hostShowAction,
-}
-
-var hostListCommand = &cobra.Command{
- Use: "list",
- Short: "list all hosts in the cluster",
- Run: hostListAction,
-}
-
-var hostDeleteCommand = &cobra.Command{
- Use: "delete ",
- Short: "delete a host in the cluster",
- Run: hostDeleteAction,
-}
-
-var hostUpdateCommand = &cobra.Command{
- Use: "update ",
- Short: "update a host in the cluster",
- Run: hostUpdateAction,
-}
-
-var hostInitiatorCommand = &cobra.Command{
- Use: "initiator",
- Short: "manage initiators of host in the cluster",
- Run: hostInitiatorAction,
-}
-
-var hostAddInitiatorCommand = &cobra.Command{
- Use: "add ",
- Short: "add/update an initiator into a host in the cluster",
- Run: hostAddInitiatorAction,
-}
-
-var hostRemoveInitiatorCommand = &cobra.Command{
- Use: "remove ",
- Short: "remove an initiator from a host in the cluster",
- Run: hostRemoveInitiatorAction,
-}
-
-var (
- accessMode string
- hostName string
- osType string
- ip string
- availabilityZones []string
-)
-
-var (
- hostFormatters = FormatterList{"Initiators": JsonFormatter}
- keysForDetail = KeyList{"Id", "HostName", "OsType", "IP", "Port", "AccessMode", "Username",
- "AvailabilityZones", "Initiators", "CreatedAt", "UpdatedAt"}
- keysForSummary = KeyList{"Id", "HostName", "OsType", "IP", "AccessMode", "AvailabilityZones"}
-)
-
-func init() {
-
- hostCommand.AddCommand(hostCreateCommand)
- hostCommand.AddCommand(hostDeleteCommand)
- hostCommand.AddCommand(hostShowCommand)
- hostCommand.AddCommand(hostListCommand)
- hostCommand.AddCommand(hostUpdateCommand)
-
- hostCreateCommand.Flags().StringVarP(&accessMode, "accessMode", "", "agentless", "the access mode of host, including: agentless, agent")
- hostCreateCommand.Flags().StringVarP(&osType, "osType", "", "linux", "the os type of host, includding: linux, windows")
- hostCreateCommand.Flags().StringVarP(&ip, "ip", "", "", "the IP address for access the host")
- hostCreateCommand.Flags().StringSliceVarP(&availabilityZones, "availabilityZones", "", []string{"default"}, "the array of availability zones which host belongs to")
-
- hostUpdateCommand.Flags().StringVarP(&accessMode, "accessMode", "", "agentless", "the access mode of host, including: agentless, agent")
- hostUpdateCommand.Flags().StringVarP(&hostName, "hostName", "", "", "the host name of host")
- hostUpdateCommand.Flags().StringVarP(&osType, "osType", "", "linux", "the os type of host, includding: linux, windows")
- hostUpdateCommand.Flags().StringVarP(&ip, "ip", "", "", "the IP address for access the host")
- hostUpdateCommand.Flags().StringSliceVarP(&availabilityZones, "availabilityZones", "", []string{"default"}, "the array of availability zones which host belongs to")
-
- hostInitiatorCommand.AddCommand(hostAddInitiatorCommand)
- hostInitiatorCommand.AddCommand(hostRemoveInitiatorCommand)
- hostCommand.AddCommand(hostInitiatorCommand)
-}
-
-func hostAction(cmd *cobra.Command, args []string) {
- cmd.Usage()
- os.Exit(1)
-}
-
-func hostInitiatorAction(cmd *cobra.Command, args []string) {
- cmd.Usage()
- os.Exit(1)
-}
-
-func hostCreateAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- host := &model.HostSpec{
- AccessMode: accessMode,
- HostName: args[0],
- OsType: osType,
- IP: ip,
- AvailabilityZones: availabilityZones,
- }
-
- resp, err := client.CreateHost(host)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- PrintDict(resp, keysForDetail, hostFormatters)
-}
-
-func hostShowAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- resp, err := client.GetHost(args[0])
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- PrintDict(resp, keysForDetail, hostFormatters)
-}
-
-func hostListAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 0)
- var opts = map[string]string{"hostName": hostName}
- resp, err := client.ListHosts(opts)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- PrintList(resp, keysForSummary, hostFormatters)
-}
-
-func hostDeleteAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- err := client.DeleteHost(args[0])
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-}
-
-func hostUpdateAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- host := &model.HostSpec{
- AccessMode: accessMode,
- HostName: hostName,
- OsType: osType,
- IP: ip,
- AvailabilityZones: availabilityZones,
- }
-
- resp, err := client.UpdateHost(args[0], host)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- PrintDict(resp, keysForDetail, hostFormatters)
-}
-
-func hostAddInitiatorAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 3)
- tmpHost, err := client.GetHost(args[0])
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- var initiators []*model.Initiator
- for _, e := range tmpHost.Initiators {
- if args[1] == e.PortName {
- continue
- }
- initiators = append(initiators, e)
- }
- initiators = append(initiators, &model.Initiator{
- PortName: args[1],
- Protocol: args[2],
- })
-
- host := &model.HostSpec{
- Initiators: initiators,
- }
-
- resp, err := client.UpdateHost(args[0], host)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- PrintDict(resp, keysForDetail, hostFormatters)
-}
-
-func hostRemoveInitiatorAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 2)
- tmpHost, err := client.GetHost(args[0])
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- var initiators []*model.Initiator
- for _, e := range tmpHost.Initiators {
- if args[1] == e.PortName {
- continue
- }
- initiators = append(initiators, e)
- }
-
- host := &model.HostSpec{
- Initiators: initiators,
- }
-
- resp, err := client.UpdateHost(args[0], host)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- PrintDict(resp, keysForDetail, hostFormatters)
-}
diff --git a/osdsctl/cli/host_test.go b/osdsctl/cli/host_test.go
deleted file mode 100644
index 2fb81c712..000000000
--- a/osdsctl/cli/host_test.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cli
-
-import (
- "os"
- "os/exec"
- "testing"
-
- c "github.com/opensds/opensds/client"
-)
-
-func init() {
- client = c.NewFakeClient(&c.Config{Endpoint: c.TestEp})
-}
-
-func TestHostAction(t *testing.T) {
- beCrasher := os.Getenv("BE_CRASHER")
-
- if beCrasher == "1" {
- var args []string
- hostAction(hostCommand, args)
-
- return
- }
-
- cmd := exec.Command(os.Args[0], "-test.run=TestHostAction")
- cmd.Env = append(os.Environ(), "BE_CRASHER=1")
- err := cmd.Run()
- e, ok := err.(*exec.ExitError)
-
- if ok && ("exit status 1" == e.Error()) {
- return
- }
-
- t.Fatalf("process ran with %s, want exit status 1", e.Error())
-}
-
-func TestHostCreateAction(t *testing.T) {
- var args []string
- args = append(args, "sap1")
- hostCreateAction(hostCreateCommand, args)
-}
-
-func TestHostShowAction(t *testing.T) {
- var args []string
- args = append(args, "202964b5-8e73-46fd-b41b-a8e403f3c30b")
- hostShowAction(hostShowCommand, args)
-}
-
-func TestHostListAction(t *testing.T) {
- var args []string
- hostListAction(hostListCommand, args)
-}
-
-func TestHostDeleteAction(t *testing.T) {
- var args []string
- args = append(args, "202964b5-8e73-46fd-b41b-a8e403f3c30b")
- hostDeleteAction(hostDeleteCommand, args)
-}
-
-func TestHostUpdateAction(t *testing.T) {
- var args []string
- args = append(args, "202964b5-8e73-46fd-b41b-a8e403f3c30b")
- hostUpdateAction(hostDeleteCommand, args)
-}
-
-func TestHostAddInitiatorAction(t *testing.T) {
- var args []string
- args = append(args, "202964b5-8e73-46fd-b41b-a8e403f3c30b", "20000024ff5bb888", "iSCSI")
- hostAddInitiatorAction(hostAddInitiatorCommand, args)
-}
-
-func TestHostRemoveInitiatorAction(t *testing.T) {
- var args []string
- args = append(args, "202964b5-8e73-46fd-b41b-a8e403f3c30b", "20000024ff5bb888")
- hostRemoveInitiatorAction(hostAddInitiatorCommand, args)
-}
diff --git a/osdsctl/cli/pool.go b/osdsctl/cli/pool.go
deleted file mode 100644
index 84b6f9588..000000000
--- a/osdsctl/cli/pool.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-/*
-This module implements a entry into the OpenSDS service.
-
-*/
-
-package cli
-
-import (
- "os"
-
- "github.com/spf13/cobra"
-)
-
-var poolCommand = &cobra.Command{
- Use: "pool",
- Short: "manage OpenSDS pool resources",
- Run: poolAction,
-}
-
-var poolShowCommand = &cobra.Command{
- Use: "show ",
- Short: "show information of specified pool",
- Run: poolShowAction,
-}
-
-var poolListCommand = &cobra.Command{
- Use: "list",
- Short: "get all pool resources",
- Run: poolListAction,
-}
-
-var (
- poolLimit string
- poolOffset string
- poolSortDir string
- poolSortKey string
- poolId string
- poolName string
- poolDescription string
- poolStatus string
- poolDockId string
- poolAvailabilityZone string
- poolStorageType string
-)
-
-func init() {
- poolListCommand.Flags().StringVarP(&poolLimit, "limit", "", "50", "the number of ertries displayed per page")
- poolListCommand.Flags().StringVarP(&poolOffset, "offset", "", "0", "all requested data offsets")
- poolListCommand.Flags().StringVarP(&poolSortDir, "sortDir", "", "desc", "the sort direction of all requested data. supports asc or desc(default)")
- poolListCommand.Flags().StringVarP(&poolSortKey, "sortKey", "", "id", "the sort key of all requested data. supports id(default), name, status, availabilityzone, dock id, description")
- poolListCommand.Flags().StringVarP(&poolId, "id", "", "", "list pools by id")
- poolListCommand.Flags().StringVarP(&poolName, "name", "", "", "list pools by name")
- poolListCommand.Flags().StringVarP(&poolDescription, "description", "", "", "list pools by description")
- poolListCommand.Flags().StringVarP(&poolStatus, "status", "", "", "list pools by status")
- poolListCommand.Flags().StringVarP(&poolStorageType, "storageType", "", "", "list pools by storage type")
- poolListCommand.Flags().StringVarP(&poolDockId, "dockId", "", "", "list pools by dock id")
- poolListCommand.Flags().StringVarP(&poolAvailabilityZone, "availabilityZone", "", "", "list pools by availability zone")
-
- poolCommand.AddCommand(poolShowCommand)
- poolCommand.AddCommand(poolListCommand)
-}
-
-func poolAction(cmd *cobra.Command, args []string) {
- cmd.Usage()
- os.Exit(1)
-}
-
-var poolFormatters = FormatterList{"Extras": JsonFormatter}
-
-func poolShowAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- pols, err := client.GetPool(args[0])
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "CreatedAt", "UpdatedAt", "Name", "Description", "Status", "DockId",
- "AvailabilityZone", "TotalCapacity", "FreeCapacity", "StorageType", "MultiAttach", "Extras"}
- PrintDict(pols, keys, poolFormatters)
-}
-
-func poolListAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 0)
-
- var opts = map[string]string{"limit": poolLimit, "offset": poolOffset, "sortDir": poolSortDir,
- "sortKey": poolSortKey, "Id": poolId,
- "Name": poolName, "Description": poolDescription, "AvailabilityZone": poolAvailabilityZone,
- "Status": poolStatus,
- "DockId": poolDockId, "StorageType": poolStorageType}
-
- pols, err := client.ListPools(opts)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "Name", "Description", "Status", "TotalCapacity", "FreeCapacity"}
- PrintList(pols, keys, poolFormatters)
-}
diff --git a/osdsctl/cli/pool_test.go b/osdsctl/cli/pool_test.go
deleted file mode 100644
index bbb2ebee3..000000000
--- a/osdsctl/cli/pool_test.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cli
-
-import (
- "os"
- "os/exec"
- "testing"
-
- c "github.com/opensds/opensds/client"
-)
-
-func init() {
- client = c.NewFakeClient(&c.Config{Endpoint: c.TestEp})
-}
-
-func TestPoolAction(t *testing.T) {
- beCrasher := os.Getenv("BE_CRASHER")
-
- if beCrasher == "1" {
- var args []string
- poolAction(dockCommand, args)
-
- return
- }
-
- cmd := exec.Command(os.Args[0], "-test.run=TestPoolAction")
- cmd.Env = append(os.Environ(), "BE_CRASHER=1")
- err := cmd.Run()
- e, ok := err.(*exec.ExitError)
-
- if ok && ("exit status 1" == e.Error()) {
- return
- }
-
- t.Fatalf("process ran with %s, want exit status 1", e.Error())
-}
-
-func TestPoolShowAction(t *testing.T) {
- var args []string
- args = append(args, "084bf71e-a102-11e7-88a8-e31fe6d52248")
- poolShowAction(poolShowCommand, args)
-}
-
-func TestPoolListAction(t *testing.T) {
- var args []string
- poolListAction(dockListCommand, args)
-}
diff --git a/osdsctl/cli/profile.go b/osdsctl/cli/profile.go
deleted file mode 100644
index 6a7f06aad..000000000
--- a/osdsctl/cli/profile.go
+++ /dev/null
@@ -1,231 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements a entry into the OpenSDS service.
-*/
-
-package cli
-
-import (
- "encoding/json"
- "os"
-
- "github.com/opensds/opensds/pkg/model"
- "github.com/spf13/cobra"
-)
-
-var profileCommand = &cobra.Command{
- Use: "profile",
- Short: "manage OpenSDS profile resources",
- Run: profileAction,
-}
-
-var profileCreateCommand = &cobra.Command{
- Use: "create ",
- Example: "osdsctl profile create '{\"name\": \"default_block\", \"description\": \"default policy\", \"storageType\": \"block\"}'" + "\n" +
- "osdsctl profile create '{\"name\": \"default_file\", \"description\": \"default policy\", \"storageType\": \"file\", \"provisioningProperties\":{\"ioConnectivity\": {\"accessProtocol\": \"NFS\"},\"DataStorage\":{\"StorageAccessCapability\":[\"Read\",\"Write\",\"Execute\"]}}}'" +
- "\n" +
- "\n" +
- "The example of more supported \"profile-info\" parameters:\n" +
- "\n" +
- "'{" +
- "\"name\": \"File_Profile\"," +
- "\"storageType\": \"block\"," +
- "\"description\": \"string\"," +
- "\"provisioningProperties\": {" +
- "\"dataStorage\": {" +
- "\"recoveryTimeObjective\": 10," +
- "\"provisioningPolicy\": \"Thick\"," +
- "\"compression\": false," +
- "\"deduplication\": false," +
- "\"characterCodeSet\": \"ASCII\"," +
- "\"maxFileNameLengthBytes\": 255," +
- "\"storageAccessCapability\": [\"Read\"] " +
- "}," +
- "\"ioConnectivity\": {" +
- "\"accessProtocol\": \"iscsi\"," +
- "\"maxIOPS\": 150," +
- "\"minIOPS\": 50," +
- "\"maxBWS\": 5," +
- "\"minBWS\": 1," +
- "\"latency\": 1" +
- "}" +
- "}," +
- "\"replicationProperties\": {" +
- "\"dataProtection\": {" +
- "\"isIsolated\": true," +
- "\"minLifetime\": \"P3Y6M4DT12H30M55\"," +
- "\"RecoveryGeographicObjective\": \"datacenter\"," +
- "\"RecoveryPointObjectiveTime\": \"P3Y6M4DT12H30M5S\"," +
- "\"RecoveryTimeObjective\": \"offline\"," +
- "\"ReplicaType\": \"snapshot\"" +
- "}," +
- "\"replicaInfos\": {" +
- "\"replicaUpdateMode\": \"Active\"," +
- "\"replcationBandwidth\": 5," +
- "\"replicationPeriod\": \"P3Y6M4DT12H30M5S\"," +
- "\"consistencyEnalbed\": true " +
- "}" +
- "}," +
- "\"snapshotProperties\": {" +
- "\"schedule\": {" +
- "\"datetime\": \"2019-09-07T07:02:35.389\"," +
- "\"occurrence\": \"Daily\"" +
- "}," +
- "\"retention\": {" +
- "\"duration\": 15," +
- "\"number\": 10" +
- "}," +
- "\"topology\": {" +
- "\"bucket\": \"string\"" +
- " }" +
- "}," +
- "\"dataProtectionProperties\": {" +
- "\"dataProtection\": {" +
- "\"isIsolated\": true," +
- "\"minLifetime\": \"P3Y6M4DT12H30M5S\"," +
- "\"RecoveryGeographicObjective\": \"datacenter\"," +
- "\"RecoveryPointObjectiveTime\": \"P3Y6M4DT12H30M5S\"," +
- "\"RecoveryTimeObjective\": \"offline\"," +
- "\"ReplicaType\": \"snapshot\"" +
- "}," +
- "\"consistencyEnalbed\": true " +
- "}," +
- "\"customProperties\": {" +
- "\"key1\": \"value1\"," +
- "\"key2\": false, " +
- "\"key3\": { " +
- "\"key31\": \"value31\"" +
- "}" +
- "}" +
- "}'",
-
- Short: "create a new profile resource",
- Run: profileCreateAction,
-}
-
-var profileShowCommand = &cobra.Command{
- Use: "show ",
- Short: "show information of specified profile",
- Run: profileShowAction,
-}
-
-var profileListCommand = &cobra.Command{
- Use: "list",
- Short: "get all profile resources",
- Example: "osdsctl profile list --description \"test\"\n" +
- "osdsctl profile list --id def32d39-78e2-47f3-9e2f-8c43a8b9ee3a\n" +
- "osdsctl profile list --limit 2\n" +
- "osdsctl profile list --name test\n" +
- "osdsctl profile list --offset 2\n" +
- "osdsctl profile list --sortDir desc\n" +
- "osdsctl profile list --sortKey id\n" +
- "osdsctl profile list --storageType block\n",
- Run: profileListAction,
-}
-
-var profileDeleteCommand = &cobra.Command{
- Use: "delete ",
- Short: "delete a specified profile resource",
- Run: profileDeleteAction,
-}
-
-var (
- profLimit string
- profOffset string
- profSortDir string
- profSortKey string
- profId string
- profName string
- profDescription string
- profStorageType string
- profInfo string
-)
-
-func init() {
- profileListCommand.Flags().StringVarP(&profLimit, "limit", "", "50", "the number of ertries displayed per page")
- profileListCommand.Flags().StringVarP(&profOffset, "offset", "", "0", "all requested data offsets")
- profileListCommand.Flags().StringVarP(&profSortDir, "sortDir", "", "desc", "the sort direction of all requested data. supports asc or desc(default)")
- profileListCommand.Flags().StringVarP(&profSortKey, "sortKey", "", "id", "the sort key of all requested data. supports id(default), name, description")
- profileListCommand.Flags().StringVarP(&profId, "id", "", "", "list profile by id")
- profileListCommand.Flags().StringVarP(&profName, "name", "", "", "list profile by name")
- profileListCommand.Flags().StringVarP(&profDescription, "description", "", "", "list profile by description")
- profileListCommand.Flags().StringVarP(&profStorageType, "storageType", "", "", "list profile by storage type")
-
- profileCommand.AddCommand(profileCreateCommand)
- profileCreateCommand.Flags().Lookup("profile-info")
- profileCommand.AddCommand(profileShowCommand)
- profileCommand.AddCommand(profileListCommand)
- profileCommand.AddCommand(profileDeleteCommand)
-}
-
-func profileAction(cmd *cobra.Command, args []string) {
- cmd.Usage()
- os.Exit(1)
-}
-
-var profileFormatters = FormatterList{"ProvisioningProperties": JsonFormatter, "ReplicationProperties": JsonFormatter,
- "SnapshotProperties": JsonFormatter, "DataProtectionProperties": JsonFormatter, "CustomProperties": JsonFormatter}
-
-func profileCreateAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- prf := &model.ProfileSpec{}
- if err := json.Unmarshal([]byte(args[0]), prf); err != nil {
- Errorln(err)
- cmd.Usage()
- os.Exit(1)
- }
-
- resp, err := client.CreateProfile(prf)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "CreatedAt", "Name", "Description", "StorageType", "ProvisioningProperties",
- "ReplicationProperties", "SnapshotProperties", "DataProtectionProperties", "CustomProperties"}
- PrintDict(resp, keys, profileFormatters)
-}
-
-func profileShowAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- resp, err := client.GetProfile(args[0])
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "CreatedAt", "UpdatedAt", "Name", "Description", "StorageType", "ProvisioningProperties",
- "ReplicationProperties", "SnapshotProperties", "DataProtectionProperties", "CustomProperties"}
- PrintDict(resp, keys, profileFormatters)
-}
-
-func profileListAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 0)
- var opts = map[string]string{"limit": profLimit, "offset": profOffset, "sortDir": profSortDir,
- "sortKey": profSortKey, "Id": profId,
- "Name": profName, "Description": profDescription, "StorageType": profStorageType}
-
- resp, err := client.ListProfiles(opts)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "Name", "Description", "StorageType"}
- PrintList(resp, keys, FormatterList{})
-}
-
-func profileDeleteAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- err := client.DeleteProfile(args[0])
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-}
diff --git a/osdsctl/cli/profile_test.go b/osdsctl/cli/profile_test.go
deleted file mode 100644
index 8fcfe6b51..000000000
--- a/osdsctl/cli/profile_test.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cli
-
-import (
- "os"
- "os/exec"
- "testing"
-
- c "github.com/opensds/opensds/client"
- . "github.com/opensds/opensds/testutils/collection"
-)
-
-func init() {
- client = c.NewFakeClient(&c.Config{Endpoint: c.TestEp})
-}
-
-func TestProfileAction(t *testing.T) {
- beCrasher := os.Getenv("BE_CRASHER")
-
- if beCrasher == "1" {
- var args []string
- profileAction(profileCommand, args)
-
- return
- }
-
- cmd := exec.Command(os.Args[0], "-test.run=TestProfileAction")
- cmd.Env = append(os.Environ(), "BE_CRASHER=1")
- err := cmd.Run()
- e, ok := err.(*exec.ExitError)
-
- if ok && ("exit status 1" == e.Error()) {
- return
- }
-
- t.Fatalf("process ran with %s, want exit status 1", e.Error())
-}
-
-func TestProfileCreateAction(t *testing.T) {
- var args []string
- args = append(args, ByteProfile)
- profileCreateAction(profileCreateCommand, args)
-}
-
-func TestProfileShowAction(t *testing.T) {
- var args []string
- args = append(args, "1106b972-66ef-11e7-b172-db03f3689c9c")
- profileShowAction(profileShowCommand, args)
-}
-
-func TestProfileListAction(t *testing.T) {
- var args []string
- profileListAction(profileListCommand, args)
-}
-
-func TestProfileDeleteAction(t *testing.T) {
- var args []string
- args = append(args, "1106b972-66ef-11e7-b172-db03f3689c9c")
- profileDeleteAction(profileDeleteCommand, args)
-}
diff --git a/osdsctl/cli/replication.go b/osdsctl/cli/replication.go
deleted file mode 100644
index 9355452a7..000000000
--- a/osdsctl/cli/replication.go
+++ /dev/null
@@ -1,290 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements a entry into the OpenSDS service.
-
-*/
-
-package cli
-
-import (
- "encoding/json"
- "os"
- "strings"
-
- "github.com/opensds/opensds/pkg/model"
- "github.com/opensds/opensds/pkg/utils"
- "github.com/spf13/cobra"
-)
-
-var replicationCommand = &cobra.Command{
- Use: "replication",
- Short: "manage replications in the cluster",
- Run: replicationAction,
-}
-
-var replicationCreateCommand = &cobra.Command{
- Use: "create ",
- Short: "create a replication of specified volumes in the cluster",
- Run: replicationCreateAction,
-}
-
-var replicationShowCommand = &cobra.Command{
- Use: "show ",
- Short: "show a replication in the cluster",
- Run: replicationShowAction,
-}
-
-var replicationListCommand = &cobra.Command{
- Use: "list",
- Short: "list all replications in the cluster",
- Run: replicationListAction,
-}
-
-var replicationDeleteCommand = &cobra.Command{
- Use: "delete ",
- Short: "delete a replication in the cluster",
- Run: replicationDeleteAction,
-}
-
-var replicationUpdateCommand = &cobra.Command{
- Use: "update ",
- Short: "update a replication in the cluster",
- Run: replicationUpdateAction,
-}
-var replicationEnableCommand = &cobra.Command{
- Use: "enable ",
- Short: "enable a replication in the cluster",
- Run: replicationEnableAction,
-}
-
-var replicationDisableCommand = &cobra.Command{
- Use: "disable ",
- Short: "disable a replication in the cluster",
- Run: replicationDisableAction,
-}
-
-var replicationFailoverCommand = &cobra.Command{
- Use: "failover ",
- Short: "failover a replication in the cluster",
- Run: replicationFailoverAction,
-}
-
-var (
- replicationName string
- replicationDesp string
- primaryReplicationDriverData string
- secondaryReplicationDriverData string
- replicationMode string
- replicationPeriod int64
- allowAttachedVolume bool
- secondaryBackendId string
-)
-
-var (
- repLimit string
- repOffset string
- repSortDir string
- repSortKey string
- repId string
- repName string
- repDesp string
- repPrimaryVolumeId string
- repSecondaryVolumeId string
-)
-
-func init() {
- replicationListCommand.Flags().StringVarP(&repLimit, "limit", "", "50", "the number of ertries displayed per page")
- replicationListCommand.Flags().StringVarP(&repOffset, "offset", "", "0", "all requested data offsets")
- replicationListCommand.Flags().StringVarP(&repSortDir, "sortDir", "", "desc", "the sort direction of all requested data. supports asc or desc(default)")
- replicationListCommand.Flags().StringVarP(&repSortKey, "sortKey", "", "id",
- "the sort key of all requested data. supports id(default), name, primaryVolumeId, secondaryVolumeId, description, create time, updatetime")
- replicationListCommand.Flags().StringVarP(&repId, "id", "", "", "list replication by id")
- replicationListCommand.Flags().StringVarP(&repName, "name", "", "", "list replication by name")
- replicationListCommand.Flags().StringVarP(&repDesp, "description", "", "", "list replication by description")
- replicationListCommand.Flags().StringVarP(&repPrimaryVolumeId, "primaryVolumeId", "", "", "list replication by PrimaryVolumeId")
- replicationListCommand.Flags().StringVarP(&repSecondaryVolumeId, "secondaryVolumeId", "", "", "list replication by storage userId")
-
- replicationCommand.AddCommand(replicationCreateCommand)
- flags := replicationCreateCommand.Flags()
- flags.StringVarP(&replicationName, "name", "n", "", "the name of created replication")
- flags.StringVarP(&replicationDesp, "description", "d", "", "the description of created replication")
- flags.StringVarP(&primaryReplicationDriverData, "primary_driver_data", "", "", "the primary replication driver data of created replication")
- flags.StringVarP(&secondaryReplicationDriverData, "secondary_driver_data", "", "", "the secondary replication driver data of created replication")
- flags.StringVarP(&replicationMode, "replication_mode", "m", model.ReplicationModeSync, "the replication mode of created replication, value can be sync/async")
- flags.Int64VarP(&replicationPeriod, "replication_period", "t", 0, "the replication period(minute) of created replication, the value must greater than 0, only in sync replication mode should set this value (default 60)")
- replicationUpdateCommand.Flags().StringVarP(&replicationName, "name", "n", "", "the name of updated replication")
- replicationUpdateCommand.Flags().StringVarP(&replicationDesp, "description", "d", "", "the description of updated replication")
- // TODO: Add some other update items, such as status, replication_period ... etc.
- replicationFailoverCommand.Flags().BoolVarP(&allowAttachedVolume, "allow_attached_volume", "a", false, "whether allow attached volume when failing over replication")
- replicationFailoverCommand.Flags().StringVarP(&secondaryBackendId, "secondary_backend_id", "s", model.ReplicationDefaultBackendId, "the secondary backend id of failoverr replication")
- replicationCommand.AddCommand(replicationShowCommand)
- replicationCommand.AddCommand(replicationListCommand)
- replicationCommand.AddCommand(replicationDeleteCommand)
- replicationCommand.AddCommand(replicationUpdateCommand)
- replicationCommand.AddCommand(replicationEnableCommand)
- replicationCommand.AddCommand(replicationDisableCommand)
- replicationCommand.AddCommand(replicationFailoverCommand)
-}
-
-func replicationAction(cmd *cobra.Command, args []string) {
- cmd.Usage()
- os.Exit(1)
-}
-
-var replicationFormatters = FormatterList{"PrimaryReplicationDriverData": JsonFormatter,
- "SecondaryReplicationDriverData": JsonFormatter}
-
-func replicationCreateAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 2)
- validMode := []string{model.ReplicationModeSync, model.ReplicationModeAsync}
- var mode = strings.ToLower(replicationMode)
- if !utils.Contained(mode, validMode) {
- Fatalf("invalid replication mode '%s'\n", replicationMode)
- }
-
- prdd := map[string]string{}
- if len(primaryReplicationDriverData) != 0 {
- if err := json.Unmarshal([]byte(primaryReplicationDriverData), &prdd); err != nil {
- Debugln(err)
- Fatalln("invalid replication primary driver data")
- }
- }
-
- srdd := map[string]string{}
- if len(secondaryReplicationDriverData) != 0 {
- if err := json.Unmarshal([]byte(secondaryReplicationDriverData), &prdd); err != nil {
- Debugln(err)
- Fatalln("invalid replication secondary driver data")
- }
- }
-
- switch {
- case replicationPeriod < 0:
- Fatalf("invalid replication period '%d'\n", replicationPeriod)
- case replicationPeriod != 0 && replicationMode == model.ReplicationModeSync:
- Fatalf("no need to set replication_period when the replication mode is 'sync'\n")
- case replicationPeriod != 0:
- break
- case replicationPeriod == 0 && replicationMode == model.ReplicationModeAsync:
- replicationPeriod = model.ReplicationDefaultPeriod
- }
-
- replica := &model.ReplicationSpec{
- Name: replicationName,
- Description: replicationDesp,
- PrimaryVolumeId: args[0],
- SecondaryVolumeId: args[1],
- PrimaryReplicationDriverData: prdd,
- SecondaryReplicationDriverData: srdd,
- ReplicationMode: mode,
- ReplicationPeriod: replicationPeriod,
- }
-
- resp, err := client.CreateReplication(replica)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "CreatedAt", "Name", "Description", "AvailabilityZone",
- "PrimaryVolumeId", "SecondaryVolumeId", "PrimaryReplicationDriverData", "SecondaryReplicationDriverData",
- "ReplicationStatus", "ReplicationMode", "ReplicationPeriod", "ProfileId"}
- PrintDict(resp, keys, replicationFormatters)
-}
-
-func replicationShowAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- resp, err := client.GetReplication(args[0])
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "CreatedAt", "UpdatedAt", "Name", "Description", "AvailabilityZone",
- "PrimaryVolumeId", "SecondaryVolumeId", "PrimaryReplicationDriverData", "SecondaryReplicationDriverData",
- "ReplicationStatus", "ReplicationMode", "ReplicationPeriod", "ProfileId"}
- PrintDict(resp, keys, replicationFormatters)
-}
-
-func replicationListAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 0)
-
- var opts = map[string]string{"limit": repLimit, "offset": repOffset, "sortDir": repSortDir,
- "sortKey": repSortKey, "Id": repId,
- "Name": repName, "Description": repDesp, "PrimaryVolumeId": repPrimaryVolumeId,
- "SecondaryVolumeId": repSecondaryVolumeId}
-
- resp, err := client.ListReplications(opts)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "Name", "Description", "PrimaryVolumeId", "SecondaryVolumeId",
- "ReplicationStatus", "ReplicationMode"}
- PrintList(resp, keys, FormatterList{})
-}
-
-func replicationUpdateAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- replica := &model.ReplicationSpec{
- Name: replicationName,
- Description: replicationDesp,
- }
-
- resp, err := client.UpdateReplication(args[0], replica)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "UpdatedAt", "Name", "Description", "AvailabilityZone",
- "PrimaryVolumeId", "SecondaryVolumeId", "PrimaryReplicationDriverData", "SecondaryReplicationDriverData",
- "ReplicationStatus", "ReplicationMode", "ReplicationPeriod", "ProfileId"}
- PrintDict(resp, keys, replicationFormatters)
-}
-
-func replicationDeleteAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- replicaId := args[0]
- err := client.DeleteReplication(replicaId, nil)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-}
-
-func replicationEnableAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- replicaId := args[0]
- err := client.EnableReplication(replicaId)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-}
-
-func replicationDisableAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- replicaId := args[0]
- err := client.DisableReplication(replicaId)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-}
-
-func replicationFailoverAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- replicaId := args[0]
- failoverReplication := &model.FailoverReplicationSpec{
- AllowAttachedVolume: allowAttachedVolume,
- SecondaryBackendId: secondaryBackendId,
- }
- err := client.FailoverReplication(replicaId, failoverReplication)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-}
diff --git a/osdsctl/cli/replication_test.go b/osdsctl/cli/replication_test.go
deleted file mode 100644
index 7acdfd94c..000000000
--- a/osdsctl/cli/replication_test.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cli
-
-import (
- "os"
- "os/exec"
- "testing"
-
- c "github.com/opensds/opensds/client"
-)
-
-func init() {
- client = c.NewFakeClient(&c.Config{Endpoint: c.TestEp})
-}
-
-func TestReplicationAction(t *testing.T) {
- beCrasher := os.Getenv("BE_CRASHER")
-
- if beCrasher == "1" {
- volumeAttachmentAction(volumeAttachmentCommand, []string{})
- return
- }
-
- cmd := exec.Command(os.Args[0], "-test.run=TestReplicationAction")
- cmd.Env = append(os.Environ(), "BE_CRASHER=1")
- err := cmd.Run()
- e, ok := err.(*exec.ExitError)
-
- if ok && ("exit status 1" == e.Error()) {
- return
- }
-
- t.Fatalf("process ran with %s, want exit status 1", e.Error())
-}
-
-func TestReplicationCreateAction(t *testing.T) {
- var args = []string{
- "3fc90eda-4ef6-410d-b1b9-f39c6476683d",
- "e0bfd484-0a95-429a-9065-2a797f673d0d",
- }
- replicationCreateAction(replicationCreateCommand, args)
-}
-
-func TestReplicationShowAction(t *testing.T) {
- var args = []string{"f2dda3d2-bf79-11e7-8665-f750b088f63e"}
- replicationShowAction(replicationShowCommand, args)
-}
-
-func TestReplicationListAction(t *testing.T) {
- var args []string
- replicationListAction(replicationListCommand, args)
-}
-
-func TestReplicationDeleteAction(t *testing.T) {
- var args = []string{"f2dda3d2-bf79-11e7-8665-f750b088f63e"}
- replicationDeleteAction(replicationDeleteCommand, args)
-}
-
-func TestReplicationUpdateAction(t *testing.T) {
- var args = []string{"f2dda3d2-bf79-11e7-8665-f750b088f63e"}
- replicationUpdateAction(replicationUpdateCommand, args)
-}
-
-func TestReplicationEnableAction(t *testing.T) {
- var args = []string{"f2dda3d2-bf79-11e7-8665-f750b088f63e"}
- replicationEnableAction(replicationEnableCommand, args)
-}
-func TestReplicationDisableAction(t *testing.T) {
- var args = []string{"f2dda3d2-bf79-11e7-8665-f750b088f63e"}
- replicationDisableAction(replicationDisableCommand, args)
-}
-func TestReplicationFailoverAction(t *testing.T) {
- var args = []string{"f2dda3d2-bf79-11e7-8665-f750b088f63e"}
- replicationFailoverAction(replicationFailoverCommand, args)
-}
diff --git a/osdsctl/cli/table.go b/osdsctl/cli/table.go
deleted file mode 100644
index 10da1b338..000000000
--- a/osdsctl/cli/table.go
+++ /dev/null
@@ -1,314 +0,0 @@
-// This source file has been modified by The OpenSDS Authors.
-// Copyright 2017 The OpenSDS Authors.
-
-// Copyright 2017 modood. All rights reserved.
-// license that can be found in the LICENSE file.
-
-// Package table produces a string that represents slice of structs data in a text table
-
-package cli
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "reflect"
- "strings"
-
- "github.com/opensds/opensds/pkg/model"
-)
-
-type bd struct {
- H rune // BOX DRAWINGS HORIZONTAL
- V rune // BOX DRAWINGS VERTICAL
- VH rune // BOX DRAWINGS VERTICAL AND HORIZONTAL
- HU rune // BOX DRAWINGS HORIZONTAL AND UP
- HD rune // BOX DRAWINGS HORIZONTAL AND DOWN
- VL rune // BOX DRAWINGS VERTICAL AND LEFT
- VR rune // BOX DRAWINGS VERTICAL AND RIGHT
- DL rune // BOX DRAWINGS DOWN AND LEFT
- DR rune // BOX DRAWINGS DOWN AND RIGHT
- UL rune // BOX DRAWINGS UP AND LEFT
- UR rune // BOX DRAWINGS UP AND RIGHT
-}
-
-type FormatterList map[string]func(v interface{}) string
-type KeyList []string
-type StructElemCb func(name string, value reflect.Value) error
-
-var m = bd{'-', '|', '+', '+', '+', '+', '+', '+', '+', '+', '+'}
-
-func JsonFormatter(v interface{}) string {
- buf := bytes.NewBuffer([]byte{})
- enc := json.NewEncoder(buf)
- enc.SetEscapeHTML(false)
- enc.SetIndent("", strings.Repeat(" ", 2))
- enc.Encode(v)
- return buf.String()
-}
-
-// Output formats slice of structs data and writes to standard output.(Using box drawing characters)
-func PrintList(slice interface{}, keys KeyList, fmts FormatterList) {
- fmt.Println(TableList(slice, keys, fmts))
-}
-
-func PrintDict(u interface{}, keys KeyList, fmts FormatterList) {
- fmt.Println(TableDict(u, keys, fmts))
-}
-
-// Table formats slice of structs data and returns the resulting string.(Using box drawing characters)
-func TableList(slice interface{}, keys KeyList, fmts FormatterList) string {
- coln, colw, rows := parseList(slice, keys, fmts)
- table := table(coln, colw, rows, m)
- return table
-}
-
-// Table formats slice of structs data and returns the resulting string.(Using standard ascii characters)
-func TableDict(u interface{}, keys KeyList, fmts FormatterList) string {
- coln, colw, rows := parseDict(u, keys, fmts)
- table := table(coln, colw, rows, m)
- return table
-}
-
-func slice2map(slice []string) map[string]int {
- m := make(map[string]int)
- // increment map's value for every key from slice
- for _, s := range slice {
- m[s]++
- }
- return m
-}
-
-func visitStructElem(u interface{}, keys KeyList, fn StructElemCb) {
- v := reflect.ValueOf(u)
- t := reflect.TypeOf(u)
- if v.Kind() == reflect.Ptr {
- v = v.Elem()
- t = t.Elem()
- }
- if v.Kind() != reflect.Struct {
- panic("Table: items of slice should be on struct value")
- }
-
- whiteList := slice2map(keys)
- m := 0 // count of unexported field
- for n := 0; n < v.NumField(); n++ {
- if t.Field(n).PkgPath != "" {
- m++
- continue
- }
-
- cn := t.Field(n).Name
- if _, ok := whiteList[cn]; !ok {
- m++
- continue
- }
- if err := fn(cn, v); err != nil {
- panic(fmt.Sprintln("Table:", err))
- }
- }
-}
-
-func appendRow(rows [][]string, row []string) [][]string {
- maxRowNum := 0
- var items [][]string
- for _, v := range row {
- lines := strings.Split(v, "\n")
- if maxRowNum < len(lines) {
- maxRowNum = len(lines)
- }
- items = append(items, lines)
- }
- for i := 0; i < maxRowNum; i++ {
- var r []string
- for _, v := range items {
- if len(v) <= i {
- r = append(r, "")
- } else {
- r = append(r, v[i])
- }
- }
- rows = append(rows, r)
- }
- return rows
-}
-
-func getRow(u interface{}, keys KeyList, fmts FormatterList) (
- row []string, // rows of content
-) {
- visitStructElem(u, keys, func(name string, value reflect.Value) error {
- var cv string
- if fn, ok := fmts[name]; ok {
- cv = fn(value.FieldByName(name).Interface())
- } else {
- cv = fmt.Sprintf("%+v", value.FieldByName(name).Interface())
- }
- row = append(row, cv)
- return nil
- })
- return
-}
-
-func getHead(u interface{}, keys KeyList) (
- n []string, // rows of content
-) {
- visitStructElem(u, keys, func(name string, value reflect.Value) error {
- n = append(n, name)
- return nil
- })
- return
-}
-
-func mergeStrSlice(s ...[]string) (slice []string) {
- switch len(s) {
- case 0:
- break
- case 1:
- slice = s[0]
- break
- default:
- s1 := s[0]
- s2 := mergeStrSlice(s[1:]...)
- slice = make([]string, len(s1)+len(s2))
- copy(slice, s1)
- copy(slice[len(s1):], s2)
- break
- }
- return
-}
-
-func getColw(head []string, rows [][]string) (colw []int) {
- for _, v := range head {
- colw = append(colw, len(v))
- }
- for _, row := range rows {
- for i, v := range row {
- if colw[i] < len(v) {
- colw[i] = len(v)
- }
- }
- }
- return
-}
-
-func parseDict(u interface{}, keys KeyList, fmts FormatterList) (
- coln []string, // name of columns
- colw []int, // width of columns
- rows [][]string, // rows of content
-) {
- v := reflect.ValueOf(u)
- if v.Kind() == reflect.Ptr {
- v = v.Elem()
- }
-
- bv := v.FieldByName("BaseModel")
- if bv.Kind() != reflect.Invalid {
- bm := bv.Interface().(*model.BaseModel)
- bmHead := getHead(bm, keys)
- bmRow := getRow(bm, keys, fmts)
- for i := 0; i < len(bmHead); i++ {
- rows = appendRow(rows, []string{bmHead[i], bmRow[i]})
- }
- }
-
- head := getHead(u, keys)
- row := getRow(u, keys, fmts)
- for i := 0; i < len(head); i++ {
- rows = appendRow(rows, []string{head[i], row[i]})
- }
- coln = []string{"Property", "Value"}
- colw = getColw(coln, rows)
- return coln, colw, rows
-}
-
-func sliceconv(slice interface{}) []interface{} {
- v := reflect.ValueOf(slice)
- if v.Kind() == reflect.Ptr {
- v = v.Elem()
- }
- if v.Kind() != reflect.Slice {
- panic("sliceconv: param \"slice\" should be on slice value")
- }
- l := v.Len()
- r := make([]interface{}, l)
- for i := 0; i < l; i++ {
- r[i] = v.Index(i).Interface()
- }
- return r
-}
-
-func parseList(slice interface{}, keys KeyList, fmts FormatterList) (
- coln []string, // name of columns
- colw []int, // width of columns
- rows [][]string, // rows of content
-) {
- for _, u := range sliceconv(slice) {
- v := reflect.ValueOf(u)
- if v.Kind() == reflect.Ptr {
- v = v.Elem()
- }
- head := getHead(u, keys)
- row := getRow(u, keys, fmts)
-
- bv := v.FieldByName("BaseModel")
- if bv.Kind() != reflect.Invalid {
- bm := bv.Interface().(*model.BaseModel)
- bmHead := getHead(bm, keys)
- bmRow := getRow(bm, keys, fmts)
- coln = mergeStrSlice(bmHead, head)
- rows = appendRow(rows, mergeStrSlice(bmRow, row))
- } else {
- coln = head
- rows = appendRow(rows, row)
- }
- }
- if len(coln) == 0 {
- coln = keys
- }
- colw = getColw(coln, rows)
- return coln, colw, rows
-}
-
-func repeat(time int, char rune) string {
- var s = make([]rune, time)
- for i := range s {
- s[i] = char
- }
- return string(s)
-}
-
-func table(coln []string, colw []int, rows [][]string, b bd) (table string) {
- head := [][]rune{{b.DR}, {b.V}, {b.VR}}
- bttm := []rune{b.UR}
- for i, v := range colw {
- head[0] = append(head[0], []rune(repeat(v+2, b.H)+string(b.HD))...)
- head[1] = append(head[1], []rune(" "+coln[i]+repeat(v-len(coln[i])+1, ' ')+string(b.V))...)
- head[2] = append(head[2], []rune(repeat(v+2, b.H)+string(b.VH))...)
- bttm = append(bttm, []rune(repeat(v+2, b.H)+string(b.HU))...)
- }
- head[0][len(head[0])-1] = b.DL
- head[2][len(head[2])-1] = b.VL
- bttm[len(bttm)-1] = b.UL
-
- var body [][]rune
- for _, r := range rows {
- row := []rune{b.V}
- for i, v := range colw {
- // handle non-ascii character
- lb := len(r[i])
- lr := len([]rune(r[i]))
-
- row = append(row, []rune(" "+r[i]+repeat(v-lb+(lb-lr)/2+1, ' ')+string(b.V))...)
- }
- body = append(body, row)
- }
-
- for _, v := range head {
- table += string(v) + "\n"
- }
- for _, v := range body {
- table += string(v) + "\n"
- }
- table += string(bttm)
- return table
-}
diff --git a/osdsctl/cli/version.go b/osdsctl/cli/version.go
deleted file mode 100644
index e54782423..000000000
--- a/osdsctl/cli/version.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements a entry into the OpenSDS service.
-
-*/
-
-package cli
-
-import (
- "os"
-
- "github.com/spf13/cobra"
-)
-
-var versionCommand = &cobra.Command{
- Use: "version",
- Short: "manage API versions in the cluster",
- Run: versionAction,
-}
-
-var versionShowCommand = &cobra.Command{
- Use: "show ",
- Short: "show version details by specified API version in the cluster",
- Run: versionShowAction,
-}
-
-var versionListCommand = &cobra.Command{
- Use: "list",
- Short: "list information for all SDSController API versions in the cluster",
- Run: versionListAction,
-}
-
-func init() {
- versionCommand.AddCommand(versionShowCommand)
- versionCommand.AddCommand(versionListCommand)
-}
-
-func versionAction(cmd *cobra.Command, args []string) {
- cmd.Usage()
- os.Exit(1)
-}
-
-func versionShowAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- resp, err := client.GetVersion(args[0])
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Name", "Status", "UpdatedAt"}
- PrintDict(resp, keys, FormatterList{})
-}
-
-func versionListAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 0)
- resp, err := client.ListVersions()
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Name", "Status", "UpdatedAt"}
- PrintList(resp, keys, FormatterList{})
-}
diff --git a/osdsctl/cli/version_test.go b/osdsctl/cli/version_test.go
deleted file mode 100644
index 4e347a6cd..000000000
--- a/osdsctl/cli/version_test.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cli
-
-import (
- "os"
- "os/exec"
- "testing"
-
- c "github.com/opensds/opensds/client"
-)
-
-func init() {
- client = c.NewFakeClient(&c.Config{Endpoint: c.TestEp})
-}
-
-func TestVersionAction(t *testing.T) {
- beCrasher := os.Getenv("BE_CRASHER")
-
- if beCrasher == "1" {
- var args []string
- versionAction(versionCommand, args)
-
- return
- }
-
- cmd := exec.Command(os.Args[0], "-test.run=TestVersionAction")
- cmd.Env = append(os.Environ(), "BE_CRASHER=1")
- err := cmd.Run()
- e, ok := err.(*exec.ExitError)
-
- if ok && ("exit status 1" == e.Error()) {
- return
- }
-
- t.Fatalf("process ran with %s, want exit status 1", e.Error())
-}
-
-func TestVersionShowAction(t *testing.T) {
- var args []string
- args = append(args, "v1beta")
- versionShowAction(versionShowCommand, args)
-}
-
-func TestVersionListAction(t *testing.T) {
- var args []string
- versionListAction(versionListCommand, args)
-}
diff --git a/osdsctl/cli/volume.go b/osdsctl/cli/volume.go
deleted file mode 100644
index 0f59aae88..000000000
--- a/osdsctl/cli/volume.go
+++ /dev/null
@@ -1,245 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-/*
-This module implements a entry into the OpenSDS service.
-
-*/
-
-package cli
-
-import (
- "log"
- "os"
- "strconv"
-
- "github.com/opensds/opensds/pkg/model"
- "github.com/spf13/cobra"
-)
-
-var volumeCommand = &cobra.Command{
- Use: "volume",
- Short: "manage volumes in the cluster",
- Run: volumeAction,
-}
-
-var volumeCreateCommand = &cobra.Command{
- Use: "create ",
- Short: "create a volume in the cluster",
- Example: "osdsctl volume create 1 --name vol-name",
- Run: volumeCreateAction,
-}
-
-var volumeShowCommand = &cobra.Command{
- Use: "show ",
- Short: "show a volume in the cluster",
- Run: volumeShowAction,
-}
-
-var volumeListCommand = &cobra.Command{
- Use: "list",
- Short: "list all volumes in the cluster",
- Run: volumeListAction,
-}
-
-var volumeDeleteCommand = &cobra.Command{
- Use: "delete ",
- Short: "delete a volume in the cluster",
- Run: volumeDeleteAction,
-}
-
-var volumeUpdateCommand = &cobra.Command{
- Use: "update ",
- Short: "update a volume in the cluster",
- Run: volumeUpdateAction,
-}
-
-var volumeExtendCommand = &cobra.Command{
- Use: "extend ",
- Short: "extend a volume in the cluster",
- Run: volumeExtendAction,
-}
-
-var (
- profileId string
- volName string
- volDesp string
- volAz string
- volSnap string
-)
-
-var (
- volLimit string
- volOffset string
- volSortDir string
- volSortKey string
- volId string
- volTenantId string
- volUserId string
- volStatus string
- volPoolId string
- volProfileId string
- volGroupId string
- snapshotFromCloud bool
-)
-
-func init() {
- volumeListCommand.Flags().StringVarP(&volLimit, "limit", "", "50", "the number of entries displayed per page")
- volumeListCommand.Flags().StringVarP(&volOffset, "offset", "", "0", "all requested data offsets")
- volumeListCommand.Flags().StringVarP(&volSortDir, "sortDir", "", "desc", "the sort direction of all requested data. supports asc or desc(default)")
- volumeListCommand.Flags().StringVarP(&volSortKey, "sortKey", "", "id",
- "the sort key of all requested data. supports id(default), name, status, availabilityzone, profileid, tenantid, size, poolid, description")
- volumeListCommand.Flags().StringVarP(&volId, "id", "", "", "list volume by id")
- volumeListCommand.Flags().StringVarP(&volName, "name", "", "", "list volume by name")
- volumeListCommand.Flags().StringVarP(&volDesp, "description", "", "", "list volume by description")
- volumeListCommand.Flags().StringVarP(&volTenantId, "tenantId", "", "", "list volume by tenantId")
- volumeListCommand.Flags().StringVarP(&volUserId, "userId", "", "", "list volume by storage userId")
- volumeListCommand.Flags().StringVarP(&volStatus, "status", "", "", "list volume by status")
- volumeListCommand.Flags().StringVarP(&volPoolId, "poolId", "", "", "list volume by poolId")
- volumeListCommand.Flags().StringVarP(&volAz, "availabilityZone", "", "", "list volume by availability zone")
- volumeListCommand.Flags().StringVarP(&volProfileId, "profileId", "", "", "list volume by profile id")
- volumeListCommand.Flags().StringVarP(&volGroupId, "groupId", "", "", "list volume by volume group id")
-
- volumeCommand.PersistentFlags().StringVarP(&profileId, "profile", "p", "", "the id of profile configured by admin")
-
- volumeCommand.AddCommand(volumeCreateCommand)
- volumeCreateCommand.Flags().StringVarP(&volName, "name", "n", "", "the name of created volume")
- volumeCreateCommand.Flags().StringVarP(&volDesp, "description", "d", "", "the description of created volume")
- volumeCreateCommand.Flags().StringVarP(&volAz, "az", "a", "", "the availability zone of created volume")
- volumeCreateCommand.Flags().StringVarP(&volSnap, "snapshot", "s", "", "the snapshot to create volume")
- volumeCreateCommand.Flags().StringVarP(&poolId, "pool", "l", "", "the pool to create volume")
- volumeCreateCommand.Flags().BoolVarP(&snapshotFromCloud, "snapshotFromCloud", "c", false, "download snapshot from cloud")
- volumeCommand.AddCommand(volumeShowCommand)
- volumeCommand.AddCommand(volumeListCommand)
- volumeCommand.AddCommand(volumeDeleteCommand)
- volumeCommand.AddCommand(volumeUpdateCommand)
- volumeUpdateCommand.Flags().StringVarP(&volName, "name", "n", "", "the name of updated volume")
- volumeUpdateCommand.Flags().StringVarP(&volDesp, "description", "d", "", "the description of updated volume")
- volumeCommand.AddCommand(volumeExtendCommand)
-
- volumeCommand.AddCommand(volumeSnapshotCommand)
- volumeCommand.AddCommand(volumeAttachmentCommand)
- volumeCommand.AddCommand(volumeGroupCommand)
- volumeCommand.AddCommand(replicationCommand)
-}
-
-func volumeAction(cmd *cobra.Command, args []string) {
- cmd.Usage()
- os.Exit(1)
-}
-
-var volFormatters = FormatterList{"Metadata": JsonFormatter}
-
-func volumeCreateAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- size, err := strconv.Atoi(args[0])
- if err != nil {
- Fatalln("input size is not valid. It only support integer.")
- log.Fatalf("error parsing size %s: %+v", args[0], err)
- }
-
- vol := &model.VolumeSpec{
- Name: volName,
- Description: volDesp,
- AvailabilityZone: volAz,
- Size: int64(size),
- ProfileId: profileId,
- PoolId: poolId,
- SnapshotId: volSnap,
- SnapshotFromCloud: snapshotFromCloud,
- }
-
- resp, err := client.CreateVolume(vol)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-
- keys := KeyList{"Id", "CreatedAt", "Name", "Description", "Size", "AvailabilityZone",
- "Status", "PoolId", "ProfileId", "Metadata", "GroupId", "MultiAttach"}
- PrintDict(resp, keys, volFormatters)
-}
-
-func volumeShowAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- resp, err := client.GetVolume(args[0])
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "CreatedAt", "UpdatedAt", "Name", "Description", "Size",
- "AvailabilityZone", "Status", "PoolId", "ProfileId", "Metadata", "GroupId", "SnapshotId",
- "MultiAttach"}
- PrintDict(resp, keys, volFormatters)
-}
-
-func volumeListAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 0)
-
- var opts = map[string]string{"limit": volLimit, "offset": volOffset, "sortDir": volSortDir,
- "sortKey": volSortKey, "Id": volId,
- "Name": volName, "Description": volDesp, "UserId": volUserId, "AvailabilityZone": volAz,
- "Status": volStatus, "PoolId": volPoolId, "ProfileId": volProfileId, "GroupId": volGroupId}
-
- resp, err := client.ListVolumes(opts)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "Name", "Description", "Size", "Status", "ProfileId", "AvailabilityZone"}
- PrintList(resp, keys, volFormatters)
-}
-
-func volumeDeleteAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- vol := &model.VolumeSpec{
- ProfileId: profileId,
- }
- err := client.DeleteVolume(args[0], vol)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-}
-
-func volumeUpdateAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- vol := &model.VolumeSpec{
- Name: volName,
- Description: volDesp,
- }
-
- resp, err := client.UpdateVolume(args[0], vol)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "UpdatedAt", "Name", "Description", "Size", "AvailabilityZone",
- "Status", "PoolId", "ProfileId", "Metadata", "GroupId", "MultiAttach"}
- PrintDict(resp, keys, volFormatters)
-}
-
-func volumeExtendAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 2)
- newSize, err := strconv.Atoi(args[1])
- if err != nil {
- log.Fatalf("error parsing new size %s: %+v", args[1], err)
- }
-
- body := &model.ExtendVolumeSpec{
- NewSize: int64(newSize),
- }
-
- resp, err := client.ExtendVolume(args[0], body)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "CreatedAt", "UpdatedAt", "Name", "Description", "Size",
- "AvailabilityZone", "Status", "PoolId", "ProfileId", "Metadata", "GroupId", "MultiAttach"}
- PrintDict(resp, keys, volFormatters)
-}
diff --git a/osdsctl/cli/volume_test.go b/osdsctl/cli/volume_test.go
deleted file mode 100644
index 2c2c137c6..000000000
--- a/osdsctl/cli/volume_test.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cli
-
-import (
- "os"
- "os/exec"
- "testing"
-
- c "github.com/opensds/opensds/client"
-)
-
-func init() {
- client = c.NewFakeClient(&c.Config{Endpoint: c.TestEp})
-}
-
-func TestVolumeAction(t *testing.T) {
- beCrasher := os.Getenv("BE_CRASHER")
-
- if beCrasher == "1" {
- var args []string
- volumeAction(volumeCommand, args)
-
- return
- }
-
- cmd := exec.Command(os.Args[0], "-test.run=TestVolumeAction")
- cmd.Env = append(os.Environ(), "BE_CRASHER=1")
- err := cmd.Run()
- e, ok := err.(*exec.ExitError)
-
- if ok && ("exit status 1" == e.Error()) {
- return
- }
-
- t.Fatalf("process ran with %s, want exit status 1", e.Error())
-}
-
-func TestVolumeCreateAction(t *testing.T) {
- var args []string
- args = append(args, "1")
- volumeCreateAction(volumeCreateCommand, args)
-}
-
-func TestVolumeShowAction(t *testing.T) {
- var args []string
- args = append(args, "bd5b12a8-a101-11e7-941e-d77981b584d8")
- volumeShowAction(volumeShowCommand, args)
-}
-
-func TestVolumeListAction(t *testing.T) {
- var args []string
- volumeListAction(volumeListCommand, args)
-}
-
-func TestVolumeDeleteAction(t *testing.T) {
- var args []string
- args = append(args, "bd5b12a8-a101-11e7-941e-d77981b584d8")
- volumeDeleteAction(volumeDeleteCommand, args)
-}
-
-func TestVolumeUpdateAction(t *testing.T) {
- var args []string
- args = append(args, "bd5b12a8-a101-11e7-941e-d77981b584d8")
- volumeUpdateAction(volumeDeleteCommand, args)
-}
-func TestVolumeExtendAction(t *testing.T) {
- var args []string
- args = append(args, "bd5b12a8-a101-11e7-941e-d77981b584d8")
- args = append(args, "5")
- volumeExtendAction(volumeExtendCommand, args)
-}
diff --git a/osdsctl/cli/volumeattachment.go b/osdsctl/cli/volumeattachment.go
deleted file mode 100644
index 7279651d8..000000000
--- a/osdsctl/cli/volumeattachment.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements a entry into the OpenSDS service.
-
-*/
-
-package cli
-
-import (
- "encoding/json"
- "os"
-
- "github.com/opensds/opensds/pkg/model"
- "github.com/spf13/cobra"
-)
-
-var volumeAttachmentCommand = &cobra.Command{
- Use: "attachment",
- Short: "manage volume attachments in the cluster",
- Run: volumeAttachmentAction,
-}
-
-var volumeAttachmentCreateCommand = &cobra.Command{
- Use: "create ",
- Short: "Attach a volume to a host in the cluster",
- Run: volumeAttachmentCreateAction,
-}
-
-var volumeAttachmentShowCommand = &cobra.Command{
- Use: "show ",
- Short: "show a volume attachment in the cluster",
- Run: volumeAttachmentShowAction,
-}
-
-var volumeAttachmentListCommand = &cobra.Command{
- Use: "list",
- Short: "list all volume attachments in the cluster",
- Run: volumeAttachmentListAction,
-}
-
-var volumeAttachmentDeleteCommand = &cobra.Command{
- Use: "delete ",
- Short: "detach a volume from a host in the cluster",
- Run: volumeAttachmentDeleteAction,
-}
-
-var volumeAttachmentUpdateCommand = &cobra.Command{
- Use: "update ",
- Short: "update a volume attachment in the cluster",
- Run: volumeAttachmentUpdateAction,
-}
-
-var (
- volAtmLimit string
- volAtmOffset string
- volAtmSortDir string
- volAtmSortKey string
- volAtmId string
- volAtmUserId string
- volAtmVolumeId string
- volAtmMountpoint string
- volAtmStatus string
-)
-
-func init() {
- volumeAttachmentListCommand.Flags().StringVarP(&volAtmLimit, "limit", "", "50", "the number of ertries displayed per page")
- volumeAttachmentListCommand.Flags().StringVarP(&volAtmOffset, "offset", "", "0", "all requested data offsets")
- volumeAttachmentListCommand.Flags().StringVarP(&volAtmSortDir, "sortDir", "", "desc", "the sort direction of all requested data. supports asc or desc(default)")
- volumeAttachmentListCommand.Flags().StringVarP(&volAtmSortKey, "sortKey", "", "id",
- "the sort key of all requested data. supports id(default), volumeid, status, userid, tenantid")
- volumeAttachmentListCommand.Flags().StringVarP(&volAtmId, "id", "", "", "list volume attachment by id")
- volumeAttachmentListCommand.Flags().StringVarP(&volAtmUserId, "userId", "", "", "list volume attachment by storage userId")
- volumeAttachmentListCommand.Flags().StringVarP(&volAtmVolumeId, "volumeId", "", "", "list volume attachment by volumeId")
- volumeAttachmentListCommand.Flags().StringVarP(&volAtmStatus, "status", "", "", "list volume attachment by status")
- volumeAttachmentListCommand.Flags().StringVarP(&volAtmMountpoint, "mountpoint", "", "", "list volume attachment by mountpoint")
-
- volumeAttachmentCommand.AddCommand(volumeAttachmentCreateCommand)
- volumeAttachmentCommand.AddCommand(volumeAttachmentShowCommand)
- volumeAttachmentCommand.AddCommand(volumeAttachmentListCommand)
- volumeAttachmentCommand.AddCommand(volumeAttachmentDeleteCommand)
- volumeAttachmentCommand.AddCommand(volumeAttachmentUpdateCommand)
-}
-
-func volumeAttachmentAction(cmd *cobra.Command, args []string) {
- cmd.Usage()
- os.Exit(1)
-}
-
-var attachmentFormatters = FormatterList{"ConnectionInfo": JsonFormatter}
-
-func volumeAttachmentCreateAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 2)
- attachment := &model.VolumeAttachmentSpec{
- VolumeId: args[0],
- HostId: args[1],
- }
- resp, err := client.CreateVolumeAttachment(attachment)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "HostId", "VolumeId", "Status", "Mountpoint",
- "AttachMode", "ConnectionInfo", "CreatedAt"}
- PrintDict(resp, keys, attachmentFormatters)
-}
-
-func volumeAttachmentShowAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- resp, err := client.GetVolumeAttachment(args[0])
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "TenantId", "UserId", "HostId", "VolumeId", "Status",
- "Mountpoint", "AccessProtocol", "AttachMode", "ConnectionInfo", "CreatedAt", "UpdatedAt"}
- PrintDict(resp, keys, attachmentFormatters)
-}
-
-func volumeAttachmentListAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 0)
-
- var opts = map[string]string{"limit": volAtmLimit, "offset": volAtmOffset,
- "sortDir": volAtmSortDir, "sortKey": volAtmSortKey, "Id": volAtmId,
- "UserId": volAtmUserId, "VolumeId": volAtmVolumeId,
- "Status": volAtmStatus, "Mountpoint": volAtmMountpoint}
-
- resp, err := client.ListVolumeAttachments(opts)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "HostId", "VolumeId", "Status", "AccessProtocol"}
- PrintList(resp, keys, attachmentFormatters)
-}
-
-func volumeAttachmentDeleteAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- attachment := &model.VolumeAttachmentSpec{}
- err := client.DeleteVolumeAttachment(args[0], attachment)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-}
-
-func volumeAttachmentUpdateAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 2)
- attachment := &model.VolumeAttachmentSpec{}
- if err := json.Unmarshal([]byte(args[1]), attachment); err != nil {
- Errorln(err)
- cmd.Usage()
- os.Exit(1)
- }
-
- resp, err := client.UpdateVolumeAttachment(args[0], attachment)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-
- keys := KeyList{"Id", "HostId", "VolumeId", "Status", "Mountpoint",
- "AttachMode", "ConnectionInfo", "UpdatedAt"}
- PrintDict(resp, keys, attachmentFormatters)
-}
diff --git a/osdsctl/cli/volumeattachment_test.go b/osdsctl/cli/volumeattachment_test.go
deleted file mode 100644
index 6fee81427..000000000
--- a/osdsctl/cli/volumeattachment_test.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cli
-
-import (
- "os"
- "os/exec"
- "testing"
-
- c "github.com/opensds/opensds/client"
- . "github.com/opensds/opensds/testutils/collection"
-)
-
-func init() {
- client = c.NewFakeClient(&c.Config{Endpoint: c.TestEp})
-}
-
-func TestVolumeAttachmentAction(t *testing.T) {
- beCrasher := os.Getenv("BE_CRASHER")
-
- if beCrasher == "1" {
- var args []string
- volumeAttachmentAction(volumeAttachmentCommand, args)
-
- return
- }
-
- cmd := exec.Command(os.Args[0], "-test.run=TestVolumeAttachmentAction")
- cmd.Env = append(os.Environ(), "BE_CRASHER=1")
- err := cmd.Run()
- e, ok := err.(*exec.ExitError)
-
- if ok && ("exit status 1" == e.Error()) {
- return
- }
-
- t.Fatalf("process ran with %s, want exit status 1", e.Error())
-}
-
-func TestVolumeAttachmentCreateAction(t *testing.T) {
- var args []string
- args = append(args, "f2dda3d2-bf79-11e7-8665-f750b088f63e", "202964b5-8e73-46fd-b41b-a8e403f3c30b")
- volumeAttachmentCreateAction(volumeAttachmentCreateCommand, args)
-}
-
-func TestVolumeAttachmentShowAction(t *testing.T) {
- var args []string
- args = append(args, "f2dda3d2-bf79-11e7-8665-f750b088f63e")
- volumeAttachmentShowAction(volumeAttachmentShowCommand, args)
-}
-
-func TestVolumeAttachmentListAction(t *testing.T) {
- var args []string
- volumeAttachmentListAction(volumeAttachmentListCommand, args)
-}
-
-func TestVolumeAttachmentDeleteAction(t *testing.T) {
- var args []string
- args = append(args, "f2dda3d2-bf79-11e7-8665-f750b088f63e")
- volumeAttachmentDeleteAction(volumeAttachmentDeleteCommand, args)
-}
-
-func TestVolumeAttachmentUpdateAction(t *testing.T) {
- var args []string
- args = append(args, "f2dda3d2-bf79-11e7-8665-f750b088f63e")
- args = append(args, ByteAttachment)
- volumeAttachmentUpdateAction(volumeAttachmentDeleteCommand, args)
-}
diff --git a/osdsctl/cli/volumegroup.go b/osdsctl/cli/volumegroup.go
deleted file mode 100644
index 96d0debbd..000000000
--- a/osdsctl/cli/volumegroup.go
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-/*
-This module implements a entry into the OpenSDS service.
-
-*/
-
-package cli
-
-import (
- "fmt"
- "os"
-
- "github.com/opensds/opensds/pkg/model"
- "github.com/spf13/cobra"
-)
-
-var volumeGroupCommand = &cobra.Command{
- Use: "group",
- Short: "manage volume group in the cluster",
- Run: volumeGroupAction,
-}
-
-var volumeGroupCreateCommand = &cobra.Command{
- Use: "create",
- Short: "create a volume group in the cluster",
- Run: volumeGroupCreateAction,
-}
-
-var volumeGroupShowCommand = &cobra.Command{
- Use: "show ",
- Short: "show a volume group in the cluster",
- Run: volumeGroupShowAction,
-}
-
-var volumeGroupListCommand = &cobra.Command{
- Use: "list",
- Short: "list all volume groups in the cluster",
- Run: volumeGroupListAction,
-}
-
-var volumeGroupDeleteCommand = &cobra.Command{
- Use: "delete ",
- Short: "delete a volume group in the cluster",
- Run: volumeGroupDeleteAction,
-}
-
-var volumeGroupUpdateCommand = &cobra.Command{
- Use: "update ",
- Short: "update a volume group in the cluster",
- Run: volumeGroupUpdateAction,
-}
-
-var (
- vgLimit string
- vgOffset string
- vgSortDir string
- vgSortKey string
- vgId string
- vgTenantId string
- vgUserId string
-
- vgName string
- vgDesp string
- vgAZ string
- addVolumes *[]string
- removeVolumes *[]string
- vgprofiles *[]string
- vgStatus string
- vgPoolId string
-)
-
-func init() {
- volumeGroupListCommand.Flags().StringVarP(&vgLimit, "limit", "", "50", "the number of ertries displayed per page")
- volumeGroupListCommand.Flags().StringVarP(&vgOffset, "offset", "", "0", "all requested data offsets")
- volumeGroupListCommand.Flags().StringVarP(&vgSortDir, "sortDir", "", "desc", "the sort direction of all requested data. supports asc or desc(default)")
- volumeGroupListCommand.Flags().StringVarP(&vgSortKey, "sortKey", "", "id",
- "the sort key of all requested data. supports id(default), name, status, availability zone, tenantid, pool id")
- volumeGroupListCommand.Flags().StringVarP(&vgId, "id", "", "", "list volume group by id")
- volumeGroupListCommand.Flags().StringVarP(&vgTenantId, "tenantId", "", "", "list volume group by tenantId")
- volumeGroupListCommand.Flags().StringVarP(&vgUserId, "userId", "", "", "list volume group by storage userId")
- volumeGroupListCommand.Flags().StringVarP(&vgStatus, "status", "", "", "list volume group by status")
- volumeGroupListCommand.Flags().StringVarP(&vgName, "name", "", "", "list volume group by Name")
- volumeGroupListCommand.Flags().StringVarP(&vgDesp, "description", "", "", "list volume group by description")
- volumeGroupListCommand.Flags().StringVarP(&vgAZ, "availabilityZone", "", "", "list volume group by availability zone")
- volumeGroupListCommand.Flags().StringVarP(&vgPoolId, "poolId", "", "", "list volume group by pool id")
-
- volumeGroupCommand.AddCommand(volumeGroupCreateCommand)
- volumeGroupCreateCommand.Flags().StringVarP(&vgName, "name", "n", "", "the name of created volume group")
- volumeGroupCreateCommand.Flags().StringVarP(&vgDesp, "description", "d", "", "the description of created volume group")
- volumeGroupCreateCommand.Flags().StringVarP(&vgAZ, "availabilityZone", "a", "", "the availabilityZone of created volume group")
- vgprofiles = volumeGroupCreateCommand.Flags().StringSliceP("profiles", "", nil, "the profiles of created volume group")
- volumeGroupCommand.AddCommand(volumeGroupShowCommand)
- volumeGroupCommand.AddCommand(volumeGroupListCommand)
- volumeGroupCommand.AddCommand(volumeGroupDeleteCommand)
- volumeGroupCommand.AddCommand(volumeGroupUpdateCommand)
- volumeGroupUpdateCommand.Flags().StringVarP(&vgName, "name", "n", "", "the name of updated volume group")
- volumeGroupUpdateCommand.Flags().StringVarP(&vgDesp, "description", "d", "", "the description of updated volume group")
- addVolumes = volumeGroupUpdateCommand.Flags().StringSliceP("addVolumes", "a", nil, "the addVolumes of updated volume group")
- removeVolumes = volumeGroupUpdateCommand.Flags().StringSliceP("removeVolumes", "r", nil, "the removeVolumes of updated volume group")
-}
-
-func volumeGroupAction(cmd *cobra.Command, args []string) {
- cmd.Usage()
- os.Exit(1)
-}
-
-func volumeGroupCreateAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 0)
- vg := &model.VolumeGroupSpec{
- Name: vgName,
- Description: vgDesp,
- AvailabilityZone: vgAZ,
- Profiles: *vgprofiles,
- }
-
- resp, err := client.CreateVolumeGroup(vg)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "CreatedAt", "Name", "Description", "Status", "AvailabilityZone",
- "PoolId", "Profiles"}
- PrintDict(resp, keys, FormatterList{})
-}
-
-func volumeGroupShowAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- resp, err := client.GetVolumeGroup(args[0])
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "CreatedAt", "UpdatedAt", "Name", "Description", "Status", "AvailabilityZone",
- "PoolId", "Profiles"}
- PrintDict(resp, keys, FormatterList{})
-}
-
-func volumeGroupListAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 0)
-
- var opts = map[string]string{"limit": vgLimit, "offset": vgOffset, "sortDir": vgSortDir,
- "sortKey": vgSortKey, "Id": vgId,
- "Name": vgName, "Description": vgDesp, "UserId": vgUserId, "AvailabilityZone": vgAZ,
- "Status": vgStatus, "PoolId": vgPoolId}
-
- resp, err := client.ListVolumeGroups(opts)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "Name", "Description", "Status", "Profiles"}
- PrintList(resp, keys, FormatterList{})
-}
-
-func volumeGroupDeleteAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- err := client.DeleteVolumeGroup(args[0], nil)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- fmt.Printf("Delete group(%s) success.\n", args[0])
-}
-
-func volumeGroupUpdateAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- snp := &model.VolumeGroupSpec{
- Name: vgName,
- Description: vgDesp,
- AddVolumes: *addVolumes,
- RemoveVolumes: *removeVolumes,
- }
-
- resp, err := client.UpdateVolumeGroup(args[0], snp)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "UpdatedAt", "Name", "Description", "Status", "AvailabilityZone",
- "PoolId", "Profiles"}
- PrintDict(resp, keys, FormatterList{})
-}
diff --git a/osdsctl/cli/volumegroup_test.go b/osdsctl/cli/volumegroup_test.go
deleted file mode 100644
index cd59dd623..000000000
--- a/osdsctl/cli/volumegroup_test.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cli
-
-import (
- "os"
- "os/exec"
- "testing"
-
- c "github.com/opensds/opensds/client"
-)
-
-func init() {
- client = c.NewFakeClient(&c.Config{Endpoint: c.TestEp})
-}
-
-func TestVolumeGroupAction(t *testing.T) {
- beCrasher := os.Getenv("BE_CRASHER")
-
- if beCrasher == "1" {
- var args []string
- volumeGroupAction(volumeGroupCommand, args)
-
- return
- }
-
- cmd := exec.Command(os.Args[0], "-test.run=TestVolumeGroupAction")
- cmd.Env = append(os.Environ(), "BE_CRASHER=1")
- err := cmd.Run()
- e, ok := err.(*exec.ExitError)
-
- if ok && ("exit status 1" == e.Error()) {
- return
- }
-
- t.Fatalf("process ran with %s, want exit status 1", e.Error())
-}
-
-func TestVolumeGroupCreateAction(t *testing.T) {
- var args []string
- volumeGroupCreateAction(volumeGroupCreateCommand, args)
-}
-
-func TestVolumeGroupShowAction(t *testing.T) {
- var args []string
- args = append(args, "3769855c-a102-11e7-b772-17b880d2f537")
- volumeGroupShowAction(volumeGroupShowCommand, args)
-}
-
-func TestVolumeGroupListAction(t *testing.T) {
- var args []string
- volumeGroupListAction(volumeGroupListCommand, args)
-}
-
-func TestVolumeGroupDeleteAction(t *testing.T) {
- var args []string
- args = append(args, "3769855c-a102-11e7-b772-17b880d2f537")
- volumeGroupDeleteAction(volumeGroupDeleteCommand, args)
-}
-
-func TestVolumeGroupUpdateAction(t *testing.T) {
- var args []string
- args = append(args, "3769855c-a102-11e7-b772-17b880d2f537")
- volumeGroupUpdateAction(volumeGroupDeleteCommand, args)
-}
diff --git a/osdsctl/cli/volumesnapshot.go b/osdsctl/cli/volumesnapshot.go
deleted file mode 100644
index b0bdc9ca3..000000000
--- a/osdsctl/cli/volumesnapshot.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements a entry into the OpenSDS service.
-
-*/
-
-package cli
-
-import (
- "os"
-
- "github.com/opensds/opensds/pkg/model"
- "github.com/spf13/cobra"
-)
-
-var volumeSnapshotCommand = &cobra.Command{
- Use: "snapshot",
- Short: "manage volume snapshots in the cluster",
- Run: volumeSnapshotAction,
-}
-
-var volumeSnapshotCreateCommand = &cobra.Command{
- Use: "create ",
- Short: "create a snapshot of specified volume in the cluster",
- Run: volumeSnapshotCreateAction,
-}
-
-var volumeSnapshotShowCommand = &cobra.Command{
- Use: "show ",
- Short: "show a volume snapshot in the cluster",
- Run: volumeSnapshotShowAction,
-}
-
-var volumeSnapshotListCommand = &cobra.Command{
- Use: "list",
- Short: "list all volume snapshots in the cluster",
- Run: volumeSnapshotListAction,
-}
-
-var volumeSnapshotDeleteCommand = &cobra.Command{
- Use: "delete ",
- Short: "delete a volume snapshot of specified volume in the cluster",
- Run: volumeSnapshotDeleteAction,
-}
-
-var volumeSnapshotUpdateCommand = &cobra.Command{
- Use: "update ",
- Short: "update a volume snapshot in the cluster",
- Run: volumeSnapshotUpdateAction,
-}
-
-var (
- volSnapshotName string
- volSnapshotDesp string
-)
-
-var (
- volSnapLimit string
- volSnapOffset string
- volSnapSortDir string
- volSnapSortKey string
- volSnapId string
- volSnapUserId string
- volSnapName string
- volSnapDescription string
- volSnapStatus string
- volSnapVolumeId string
-)
-
-func init() {
- volumeSnapshotListCommand.Flags().StringVarP(&volSnapLimit, "limit", "", "50", "the number of ertries displayed per page")
- volumeSnapshotListCommand.Flags().StringVarP(&volSnapOffset, "offset", "", "0", "all requested data offsets")
- volumeSnapshotListCommand.Flags().StringVarP(&volSnapSortDir, "sortDir", "", "desc", "the sort direction of all requested data. supports asc or desc(default)")
- volumeSnapshotListCommand.Flags().StringVarP(&volSnapSortKey, "sortKey", "", "id",
- "the sort key of all requested data. supports id(default), volumeid, status, userid, tenantid, size")
- volumeSnapshotListCommand.Flags().StringVarP(&volSnapId, "id", "", "", "list volume snapshot by id")
- volumeSnapshotListCommand.Flags().StringVarP(&volSnapUserId, "userId", "", "", "list volume snapshot by storage userId")
- volumeSnapshotListCommand.Flags().StringVarP(&volSnapVolumeId, "volumeId", "", "", "list volume snapshot by volume id")
- volumeSnapshotListCommand.Flags().StringVarP(&volSnapStatus, "status", "", "", "list volume snapshot by status")
- volumeSnapshotListCommand.Flags().StringVarP(&volSnapName, "name", "", "", "list volume snapshot by Name")
- volumeSnapshotListCommand.Flags().StringVarP(&volSnapDescription, "description", "", "", "list volume snapshot by description")
-
- volumeSnapshotCommand.AddCommand(volumeSnapshotCreateCommand)
- volumeSnapshotCreateCommand.Flags().StringVarP(&volSnapshotName, "name", "n", "", "the name of created volume snapshot")
- volumeSnapshotCreateCommand.Flags().StringVarP(&volSnapshotDesp, "description", "d", "", "the description of created volume snapshot")
- volumeSnapshotCommand.AddCommand(volumeSnapshotShowCommand)
- volumeSnapshotCommand.AddCommand(volumeSnapshotListCommand)
- volumeSnapshotCommand.AddCommand(volumeSnapshotDeleteCommand)
- volumeSnapshotCommand.AddCommand(volumeSnapshotUpdateCommand)
- volumeSnapshotUpdateCommand.Flags().StringVarP(&volSnapshotName, "name", "n", "", "the name of updated volume snapshot")
- volumeSnapshotUpdateCommand.Flags().StringVarP(&volSnapshotDesp, "description", "d", "", "the description of updated volume snapshot")
-}
-
-func volumeSnapshotAction(cmd *cobra.Command, args []string) {
- cmd.Usage()
- os.Exit(1)
-}
-
-var volSnapshotFormatters = FormatterList{"Metadata": JsonFormatter}
-
-func volumeSnapshotCreateAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- snp := &model.VolumeSnapshotSpec{
- Name: volSnapshotName,
- Description: volSnapshotDesp,
- VolumeId: args[0],
- ProfileId: profileId,
- }
-
- resp, err := client.CreateVolumeSnapshot(snp)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "CreatedAt", "Name", "Description", "Size", "Status",
- "ProfileId", "VolumeId", "Metadata"}
- PrintDict(resp, keys, volSnapshotFormatters)
-}
-
-func volumeSnapshotShowAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- resp, err := client.GetVolumeSnapshot(args[0])
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "CreatedAt", "UpdatedAt", "Name", "Description", "Size", "Status",
- "ProfileId", "VolumeId", "Metadata"}
- PrintDict(resp, keys, volSnapshotFormatters)
-}
-
-func volumeSnapshotListAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 0)
-
- var opts = map[string]string{"limit": volSnapLimit, "offset": volSnapOffset, "sortDir": volSnapSortDir,
- "sortKey": volSnapSortKey, "Id": volSnapId,
- "Name": volSnapName, "Description": volSnapDescription, "UserId": volSnapUserId,
- "Status": volSnapStatus, "VolumeId": volSnapVolumeId}
-
- resp, err := client.ListVolumeSnapshots(opts)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "Name", "Description", "Size", "Status", "ProfileId", "VolumeId"}
- PrintList(resp, keys, volSnapshotFormatters)
-}
-
-func volumeSnapshotDeleteAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- snapID := args[0]
- err := client.DeleteVolumeSnapshot(snapID, nil)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
-}
-
-func volumeSnapshotUpdateAction(cmd *cobra.Command, args []string) {
- ArgsNumCheck(cmd, args, 1)
- snp := &model.VolumeSnapshotSpec{
- Name: volSnapshotName,
- Description: volSnapshotDesp,
- }
-
- resp, err := client.UpdateVolumeSnapshot(args[0], snp)
- if err != nil {
- Fatalln(HttpErrStrip(err))
- }
- keys := KeyList{"Id", "UpdatedAt", "Name", "Description", "Size", "Status",
- "ProfileId", "VolumeId", "Metadata"}
- PrintDict(resp, keys, volSnapshotFormatters)
-}
diff --git a/osdsctl/cli/volumesnapshot_test.go b/osdsctl/cli/volumesnapshot_test.go
deleted file mode 100644
index 2f105c2cd..000000000
--- a/osdsctl/cli/volumesnapshot_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cli
-
-import (
- "os"
- "os/exec"
- "testing"
-
- c "github.com/opensds/opensds/client"
-)
-
-func init() {
- client = c.NewFakeClient(&c.Config{Endpoint: c.TestEp})
-}
-
-func TestVolumeSnapshotAction(t *testing.T) {
- beCrasher := os.Getenv("BE_CRASHER")
-
- if beCrasher == "1" {
- var args []string
- volumeSnapshotAction(volumeSnapshotCommand, args)
-
- return
- }
-
- cmd := exec.Command(os.Args[0], "-test.run=TestVolumeSnapshotAction")
- cmd.Env = append(os.Environ(), "BE_CRASHER=1")
- err := cmd.Run()
- e, ok := err.(*exec.ExitError)
-
- if ok && ("exit status 1" == e.Error()) {
- return
- }
-
- t.Fatalf("process ran with %s, want exit status 1", e.Error())
-}
-
-func TestVolumeSnapshotCreateAction(t *testing.T) {
- var args []string
- args = append(args, "bd5b12a8-a101-11e7-941e-d77981b584d8")
- volumeSnapshotCreateAction(volumeSnapshotCreateCommand, args)
-}
-
-func TestVolumeSnapshotShowAction(t *testing.T) {
- var args []string
- args = append(args, "3769855c-a102-11e7-b772-17b880d2f537")
- volumeSnapshotShowAction(volumeSnapshotShowCommand, args)
-}
-
-func TestVolumeSnapshotListAction(t *testing.T) {
- var args []string
- volumeSnapshotListAction(volumeSnapshotListCommand, args)
-}
-
-func TestVolumeSnapshotDeleteAction(t *testing.T) {
- var args []string
- args = append(args, "3769855c-a102-11e7-b772-17b880d2f537")
- volumeSnapshotDeleteAction(volumeSnapshotDeleteCommand, args)
-}
-
-func TestVolumeSnapshotUpdateAction(t *testing.T) {
- var args []string
- args = append(args, "3769855c-a102-11e7-b772-17b880d2f537")
- volumeSnapshotUpdateAction(volumeSnapshotDeleteCommand, args)
-}
diff --git a/osdsctl/completion/osdsctl.bash_completion b/osdsctl/completion/osdsctl.bash_completion
deleted file mode 100644
index 3c5a819ab..000000000
--- a/osdsctl/completion/osdsctl.bash_completion
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright 2017 The OpenSDS Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-_osdsctl() {
- COMPREPLY=()
- local cur=${COMP_WORDS[COMP_CWORD]};
- local pre=${COMP_WORDS[COMP_CWORD-1]};
- case "$COMP_CWORD $pre" in
- "1 osdsctl")
- COMPREPLY=($(compgen -W 'dock pool profile version volume fileshare host' -- $cur)) ;;
- "2 dock")
- COMPREPLY=($(compgen -W 'list show' -- $cur)) ;;
- "2 pool")
- COMPREPLY=($(compgen -W 'list show' -- $cur)) ;;
- "2 profile")
- COMPREPLY=($(compgen -W 'create delete list show' -- $cur)) ;;
- "2 version")
- COMPREPLY=($(compgen -W 'list show' -- $cur)) ;;
- "2 volume")
- COMPREPLY=($(compgen -W 'attachment create delete list show extend snapshot update group replication' -- $cur)) ;;
- "2 fileshare")
- COMPREPLY=($(compgen -W 'create delete list show acl snapshot' -- $cur)) ;;
- "2 host")
- COMPREPLY=($(compgen -W 'create delete list show update initiator' -- $cur)) ;;
- '*')
- ;;
- esac
-
- # differentiate volume and fileshare
- if [[ $COMP_CWORD == 3 ]]; then
- local ppre=${COMP_WORDS[COMP_CWORD-2]};
- case "$ppre $pre" in
- "volume replication")
- COMPREPLY=($(compgen -W 'create delete list show update enable disable failover' -- $cur)) ;;
- "volume snapshot" | "fileshare snapshot")
- COMPREPLY=($(compgen -W 'create delete list show update' -- $cur)) ;;
- "volume attachment")
- COMPREPLY=($(compgen -W 'create delete list show update' -- $cur)) ;;
- "volume group")
- COMPREPLY=($(compgen -W 'create delete list show update' -- $cur)) ;;
- "fileshare acl")
- COMPREPLY=($(compgen -W 'create delete list show' -- $cur)) ;;
- "host initiator")
- COMPREPLY=($(compgen -W 'add remove' -- $cur)) ;;
- esac
- fi
- return 0
-}
-complete -o bashdefault -o default -F _osdsctl osdsctl
diff --git a/osdsctl/main.go b/osdsctl/main.go
deleted file mode 100755
index 828951be4..000000000
--- a/osdsctl/main.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements a entry into the OpenSDS CLI service.
-
-*/
-
-package main
-
-import (
- "log"
- "os"
-
- "github.com/opensds/opensds/osdsctl/cli"
-)
-
-func main() {
- // Assign it to the standard logger
- log.SetFlags(log.LstdFlags | log.Lshortfile)
-
- if err := cli.Run(); err != nil {
- cli.Errorln(err)
- os.Exit(1)
- }
-}
diff --git a/pkg/api/controllers/alert.go b/pkg/api/controllers/alert.go
deleted file mode 100755
index 7b8eb5174..000000000
--- a/pkg/api/controllers/alert.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements a entry into the OpenSDS northbound service to send Alerts to Alert manager
-
-*/
-
-package controllers
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "strings"
-
- log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/controller/client"
- "github.com/opensds/opensds/pkg/model"
-)
-
-func NewAlertPortal() *AlertPortal {
- return &AlertPortal{
- CtrClient: client.NewClient(),
- }
-}
-
-type AlertPortal struct {
- BasePortal
-
- CtrClient client.Client
-}
-
-func (v *AlertPortal) CreateAlert() {
-
- var postableAlert = model.PostableAlertSpec{}
-
- // Unmarshal the request body
- if err := json.NewDecoder(v.Ctx.Request.Body).Decode(&postableAlert); err != nil {
- errMsg := fmt.Sprintf("parse alert request body failed: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- alertArr := make([]*model.PostableAlertSpec, 0)
- alertArr = append(alertArr, &postableAlert)
-
- b, e := json.Marshal(alertArr)
- if e != nil {
- log.Error(e)
- v.ErrorHandle(model.ErrorInternalServer, e.Error())
- return
- }
-
- body := strings.NewReader(string(b[:]))
-
- // Alert manager will be co-located on the server, default port is 9093 for the POST API endpoint
- // Raised issue https://github.com/opensds/opensds/issues/691 to make this configurable
- req, err := http.NewRequest("POST", "http://localhost:9093/api/v1/alerts", body)
- if err != nil {
- // handle err
- v.ErrorHandle(model.ErrorInternalServer, e.Error())
- return
- }
- req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
-
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- // handle err
- v.ErrorHandle(model.ErrorInternalServer, e.Error())
- return
- }
- defer resp.Body.Close()
-
- // Marshal the result.
- resBody, _ := json.Marshal(resp)
- v.SuccessHandle(StatusAccepted, resBody)
-
- return
-}
diff --git a/pkg/api/controllers/attachment.go b/pkg/api/controllers/attachment.go
deleted file mode 100644
index e0b8bace4..000000000
--- a/pkg/api/controllers/attachment.go
+++ /dev/null
@@ -1,365 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements a entry into the OpenSDS northbound service.
-
-*/
-
-package controllers
-
-import (
- "context"
- "encoding/json"
- "fmt"
-
- log "github.com/golang/glog"
- "github.com/opensds/opensds/contrib/drivers/utils/config"
- "github.com/opensds/opensds/pkg/api/policy"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/controller/client"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- "github.com/opensds/opensds/pkg/utils"
- apiconfig "github.com/opensds/opensds/pkg/utils/config"
-)
-
-func NewVolumeAttachmentPortal() *VolumeAttachmentPortal {
- return &VolumeAttachmentPortal{
- CtrClient: client.NewClient(),
- }
-}
-
-type VolumeAttachmentPortal struct {
- BasePortal
-
- CtrClient client.Client
-}
-
-func (v *VolumeAttachmentPortal) CreateVolumeAttachment() {
- if !policy.Authorize(v.Ctx, "volume:create_attachment") {
- return
- }
- ctx := c.GetContext(v.Ctx)
- var attachment = model.VolumeAttachmentSpec{
- BaseModel: &model.BaseModel{},
- }
-
- if err := json.NewDecoder(v.Ctx.Request.Body).Decode(&attachment); err != nil {
- errMsg := fmt.Sprintf("parse volume attachment request body failed: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // Check if host exists
- host, err := db.C.GetHost(ctx, attachment.HostId)
- if err != nil {
- errMsg := fmt.Sprintf("get host failed in create volume attachment method: %v", err)
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // Check if volume exists and volume status is normal
- vol, err := db.C.GetVolume(ctx, attachment.VolumeId)
- if err != nil {
- errMsg := fmt.Sprintf("get volume failed in create volume attachment method: %v", err)
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- if !utils.Contains(host.AvailabilityZones, vol.AvailabilityZone) {
- errMsg := fmt.Sprintf("availability zone of volume: %s is not in the host availability zones: %v",
- vol.AvailabilityZone,
- host.AvailabilityZones)
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- if vol.Status == model.VolumeAvailable {
- db.UpdateVolumeStatus(ctx, db.C, vol.Id, model.VolumeAttaching)
- } else if vol.Status == model.VolumeInUse {
- if vol.MultiAttach {
- db.UpdateVolumeStatus(ctx, db.C, vol.Id, model.VolumeAttaching)
- } else {
- errMsg := "volume is already attached to one of the host. If you want to attach to multiple host, volume multiattach must be true"
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
- } else {
- errMsg := "status of volume is available. It can be attached to host"
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // Set AccessProtocol
- pol, err := db.C.GetPool(ctx, vol.PoolId)
- if err != nil {
- msg := fmt.Sprintf("get pool failed in create volume attachment method: %v", err)
- log.Error(msg)
- return
- }
- var protocol = pol.Extras.IOConnectivity.AccessProtocol
- if protocol == "" {
- // Default protocol is iscsi
- protocol = config.ISCSIProtocol
- }
- attachment.AccessProtocol = protocol
-
- // Set AttachMode, rw is a default setting
- if attachment.AttachMode != "ro" && attachment.AttachMode != "rw" {
- attachment.AttachMode = "rw"
- }
- attachment.Status = model.VolumeAttachCreating
-
- // NOTE:It will create a volume attachment entry into the database and initialize its status
- // as "creating". It will not wait for the real volume attachment creation to complete
- // and will return result immediately.
- result, err := db.C.CreateVolumeAttachment(ctx, &attachment)
- if err != nil {
- errMsg := fmt.Sprintf("create volume attachment failed: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // Marshal the result.
- body, _ := json.Marshal(result)
- v.SuccessHandle(StatusAccepted, body)
-
- // NOTE:The real volume attachment creation process.
- // Volume attachment creation request is sent to the Dock. Dock will update volume attachment status to "available"
- // after volume attachment creation is completed.
- if err := v.CtrClient.Connect(apiconfig.CONF.OsdsLet.ApiEndpoint); err != nil {
- log.Error("when connecting controller client:", err)
- return
- }
-
- // // Note: In some protocols, there is no related initiator
- // var initiatorPort = ""
- // for _, e := range host.Initiators {
- // if e.Protocol == protocol {
- // initiatorPort = e.PortName
- // break
- // }
- // }
- var initiators []*pb.Initiator
- for _, e := range host.Initiators {
- initiator := pb.Initiator{
- PortName: e.PortName,
- Protocol: e.Protocol,
- }
- initiators = append(initiators, &initiator)
- }
-
- opt := &pb.CreateVolumeAttachmentOpts{
- Id: result.Id,
- VolumeId: result.VolumeId,
- PoolId: vol.PoolId,
- AccessProtocol: protocol,
- HostInfo: &pb.HostInfo{
- OsType: host.OsType,
- Ip: host.IP,
- Host: host.HostName,
- Initiators: initiators,
- },
- Metadata: vol.Metadata,
- Context: ctx.ToJson(),
- }
-
- response, err := v.CtrClient.CreateVolumeAttachment(context.Background(), opt)
- if err != nil {
- log.Error("create volume attachment failed in controller service:", err)
- return
- }
- if errorMsg := response.GetError(); errorMsg != nil {
- log.Errorf("failed to create volume attachment in controller, code: %v, message: %v",
- errorMsg.GetCode(), errorMsg.GetDescription())
- return
- }
-
- return
-}
-
-func (v *VolumeAttachmentPortal) ListVolumeAttachments() {
- if !policy.Authorize(v.Ctx, "volume:list_attachments") {
- return
- }
-
- m, err := v.GetParameters()
- if err != nil {
- errMsg := fmt.Sprintf("list volume attachments failed: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- result, err := db.C.ListVolumeAttachmentsWithFilter(c.GetContext(v.Ctx), m)
- if err != nil {
- errMsg := fmt.Sprintf("list volume attachments failed: %s", err.Error())
- v.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- // Marshal the result.
- body, _ := json.Marshal(result)
- v.SuccessHandle(StatusOK, body)
-
- return
-}
-
-func (v *VolumeAttachmentPortal) GetVolumeAttachment() {
- if !policy.Authorize(v.Ctx, "volume:get_attachment") {
- return
- }
- id := v.Ctx.Input.Param(":attachmentId")
-
- result, err := db.C.GetVolumeAttachment(c.GetContext(v.Ctx), id)
- if err != nil {
- errMsg := fmt.Sprintf("volume attachment %s not found: %s", id, err.Error())
- v.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- // Marshal the result.
- body, _ := json.Marshal(result)
- v.SuccessHandle(StatusOK, body)
-
- return
-}
-
-func (v *VolumeAttachmentPortal) UpdateVolumeAttachment() {
- if !policy.Authorize(v.Ctx, "volume:update_attachment") {
- return
- }
- var attachment = model.VolumeAttachmentSpec{
- BaseModel: &model.BaseModel{},
- }
- id := v.Ctx.Input.Param(":attachmentId")
-
- if err := json.NewDecoder(v.Ctx.Request.Body).Decode(&attachment); err != nil {
- errMsg := fmt.Sprintf("parse volume attachment request body failed: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
- attachment.Id = id
-
- result, err := db.C.UpdateVolumeAttachment(c.GetContext(v.Ctx), id, &attachment)
- if err != nil {
- errMsg := fmt.Sprintf("update volume attachment failed: %s", err.Error())
- v.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- // Marshal the result.
- body, _ := json.Marshal(result)
- v.SuccessHandle(StatusOK, body)
-
- return
-}
-
-func (v *VolumeAttachmentPortal) DeleteVolumeAttachment() {
- if !policy.Authorize(v.Ctx, "volume:delete_attachment") {
- return
- }
-
- ctx := c.GetContext(v.Ctx)
- id := v.Ctx.Input.Param(":attachmentId")
- attachment, err := db.C.GetVolumeAttachment(ctx, id)
- if err != nil {
- errMsg := fmt.Sprintf("volume attachment %s not found: %s", id, err.Error())
- v.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- // Check if attachment can be deleted
- validStatus := []string{model.VolumeAttachAvailable, model.VolumeAttachError,
- model.VolumeAttachErrorDeleting}
- if !utils.Contained(attachment.Status, validStatus) {
- errMsg := fmt.Sprintf("only the volume attachment with the status available, error, error_deleting can be deleted, the volume status is %s", attachment.Status)
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // If volume id is invalid, it would mean that volume attachment creation failed before the create method
- // in storage driver was called, and delete its db entry directly.
- vol, err := db.C.GetVolume(ctx, attachment.VolumeId)
- if err != nil {
- if err := db.C.DeleteVolumeAttachment(ctx, attachment.Id); err != nil {
- errMsg := fmt.Sprintf("failed to delete volume attachment: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
- v.SuccessHandle(StatusAccepted, nil)
- return
- }
-
- host, err := db.C.GetHost(ctx, attachment.HostId)
- if err != nil {
- errMsg := fmt.Sprintf("get host failed in delete volume attachment method: %v", err)
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- attachment.Status = model.VolumeAttachDeleting
- _, err = db.C.UpdateVolumeAttachment(ctx, attachment.Id, attachment)
- if err != nil {
- errMsg := fmt.Sprintf("failed to update volume attachment: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- v.SuccessHandle(StatusAccepted, nil)
-
- // NOTE:The real volume attachment deletion process.
- // Volume attachment deletion request is sent to the Dock. Dock will delete volume attachment from database
- // or update its status to "errorDeleting" if volume connection termination failed.
- if err := v.CtrClient.Connect(apiconfig.CONF.OsdsLet.ApiEndpoint); err != nil {
- log.Error("when connecting controller client:", err)
- return
- }
-
- var initiators []*pb.Initiator
- for _, e := range host.Initiators {
- initiator := pb.Initiator{
- PortName: e.PortName,
- Protocol: e.Protocol,
- }
- initiators = append(initiators, &initiator)
- }
- opt := &pb.DeleteVolumeAttachmentOpts{
- Id: attachment.Id,
- VolumeId: attachment.VolumeId,
- PoolId: vol.PoolId,
- AccessProtocol: attachment.AccessProtocol,
- HostInfo: &pb.HostInfo{
- OsType: host.OsType,
- Ip: host.IP,
- Host: host.HostName,
- Initiators: initiators,
- },
- Metadata: vol.Metadata,
- Context: ctx.ToJson(),
- }
- response, err := v.CtrClient.DeleteVolumeAttachment(context.Background(), opt)
- if err != nil {
- log.Error("delete volume attachment failed in controller service:", err)
- return
- }
- if errorMsg := response.GetError(); errorMsg != nil {
- log.Errorf("failed to delete volume attachment in controller, code: %v, message: %v",
- errorMsg.GetCode(), errorMsg.GetDescription())
- return
- }
-
- return
-}
diff --git a/pkg/api/controllers/attachment_test.go b/pkg/api/controllers/attachment_test.go
deleted file mode 100644
index 22faaf74e..000000000
--- a/pkg/api/controllers/attachment_test.go
+++ /dev/null
@@ -1,202 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package controllers
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "net/http"
- "net/http/httptest"
- "testing"
-
- "github.com/astaxie/beego"
- "github.com/astaxie/beego/context"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
- . "github.com/opensds/opensds/testutils/collection"
- dbtest "github.com/opensds/opensds/testutils/db/testing"
-)
-
-////////////////////////////////////////////////////////////////////////////////
-// Prepare for mock server //
-////////////////////////////////////////////////////////////////////////////////
-
-func init() {
- beego.Router("/v1beta/block/attachments", &VolumeAttachmentPortal{},
- "post:CreateVolumeAttachment;get:ListVolumeAttachments")
- beego.Router("/v1beta/block/attachments/:attachmentId", &VolumeAttachmentPortal{},
- "get:GetVolumeAttachment;put:UpdateVolumeAttachment;delete:DeleteVolumeAttachment")
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Tests for volume attachment //
-////////////////////////////////////////////////////////////////////////////////
-
-func TestListVolumeAttachments(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- var sampleAttachments = []*model.VolumeAttachmentSpec{&SampleAttachments[0]}
- mockClient := new(dbtest.Client)
- m := map[string][]string{
- "volumeId": {"bd5b12a8-a101-11e7-941e-d77981b584d8"},
- "offset": {"0"},
- "limit": {"1"},
- "sortDir": {"asc"},
- "sortKey": {"name"},
- }
- mockClient.On("ListVolumeAttachmentsWithFilter", c.NewAdminContext(), m).
- Return(sampleAttachments, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/block/attachments?volumeId=bd5b12a8-a101-11e7-941e-d77981b584d8&offset=0&limit=1&sortDir=asc&sortKey=name", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
-
- var output []*model.VolumeAttachmentSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, output, sampleAttachments)
- })
-
- t.Run("Should return 500 if list volume attachments with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- m := map[string][]string{
- "volumeId": {"bd5b12a8-a101-11e7-941e-d77981b584d8"},
- "offset": {"0"},
- "limit": {"1"},
- "sortDir": {"asc"},
- "sortKey": {"name"},
- }
- mockClient.On("ListVolumeAttachmentsWithFilter", c.NewAdminContext(), m).Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("GET",
- "/v1beta/block/attachments?volumeId=bd5b12a8-a101-11e7-941e-d77981b584d8&offset=0&limit=1&sortDir=asc&sortKey=name", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 500)
- })
-}
-
-func TestGetVolumeAttachment(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetVolumeAttachment", c.NewAdminContext(), "f2dda3d2-bf79-11e7-8665-f750b088f63e").
- Return(&SampleAttachments[0], nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/block/attachments/f2dda3d2-bf79-11e7-8665-f750b088f63e", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
-
- var output model.VolumeAttachmentSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, &output, &SampleAttachments[0])
- })
-
- t.Run("Should return 404 if get volume attachment with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetVolumeAttachment", c.NewAdminContext(), "f2dda3d2-bf79-11e7-8665-f750b088f63e").
- Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/block/attachments/f2dda3d2-bf79-11e7-8665-f750b088f63e", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 404)
- })
-}
-
-func TestUpdateVolumeAttachment(t *testing.T) {
- var jsonStr = []byte(`{
- "id": "f2dda3d2-bf79-11e7-8665-f750b088f63e",
- "name": "fake volume attachment",
- "description": "fake volume attachment"
- }`)
- var expectedJson = []byte(`{
- "id": "f2dda3d2-bf79-11e7-8665-f750b088f63e",
- "name": "fake volume attachment",
- "description": "fake volume attachment",
- "status": "available",
- "volumeId": "bd5b12a8-a101-11e7-941e-d77981b584d8",
- "hostId": "202964b5-8e73-46fd-b41b-a8e403f3c30b",
- "connectionInfo": {
- "driverVolumeType": "iscsi",
- "data": {
- "targetDiscovered": true,
- "targetIqn": "iqn.2017-10.io.opensds:volume:00000001",
- "targetPortal": "127.0.0.0.1:3260",
- "discard": false
- }
- }
- }`)
- var expected model.VolumeAttachmentSpec
- json.Unmarshal(expectedJson, &expected)
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- attachment := model.VolumeAttachmentSpec{BaseModel: &model.BaseModel{}}
- json.NewDecoder(bytes.NewBuffer(jsonStr)).Decode(&attachment)
- mockClient := new(dbtest.Client)
- mockClient.On("UpdateVolumeAttachment", c.NewAdminContext(), attachment.Id, &attachment).
- Return(&expected, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("PUT", "/v1beta/block/attachments/f2dda3d2-bf79-11e7-8665-f750b088f63e", bytes.NewBuffer(jsonStr))
- w := httptest.NewRecorder()
- r.Header.Set("Content-Type", "application/JSON")
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output model.VolumeAttachmentSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, &output, &expected)
- })
-
- t.Run("Should return 500 if update volume attachment with bad request", func(t *testing.T) {
- attachment := model.VolumeAttachmentSpec{BaseModel: &model.BaseModel{}}
- json.NewDecoder(bytes.NewBuffer(jsonStr)).Decode(&attachment)
- mockClient := new(dbtest.Client)
- mockClient.On("UpdateVolumeAttachment", c.NewAdminContext(), attachment.Id, &attachment).
- Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("PUT", "/v1beta/block/attachments/f2dda3d2-bf79-11e7-8665-f750b088f63e", bytes.NewBuffer(jsonStr))
- w := httptest.NewRecorder()
- r.Header.Set("Content-Type", "application/JSON")
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 500)
- })
-}
diff --git a/pkg/api/controllers/base.go b/pkg/api/controllers/base.go
deleted file mode 100644
index 0ed85ea47..000000000
--- a/pkg/api/controllers/base.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package controllers
-
-import (
- "net/http"
- "net/url"
- "reflect"
-
- "github.com/astaxie/beego"
- log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/model"
-)
-
-const (
- StatusOK = http.StatusOK
- StatusAccepted = http.StatusAccepted
-)
-
-type BasePortal struct {
- beego.Controller
-}
-
-func (b *BasePortal) GetParameters() (map[string][]string, error) {
- u, err := url.Parse(b.Ctx.Request.URL.String())
- if err != nil {
- return nil, err
- }
- m, err := url.ParseQuery(u.RawQuery)
- if err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// Filter some items in spec that no need to transfer to users.
-func (b *BasePortal) outputFilter(resp interface{}, whiteList []string) interface{} {
- v := reflect.ValueOf(resp)
- if v.Kind() == reflect.Slice {
- var s []map[string]interface{}
- for i := 0; i < v.Len(); i++ {
- m := b.doFilter(v.Index(i).Interface(), whiteList)
- s = append(s, m)
- }
- return s
- } else {
- return b.doFilter(resp, whiteList)
- }
-}
-
-func (b *BasePortal) doFilter(resp interface{}, whiteList []string) map[string]interface{} {
- v := reflect.ValueOf(resp).Elem()
- m := map[string]interface{}{}
- for _, name := range whiteList {
- field := v.FieldByName(name)
- if field.IsValid() {
- m[name] = field.Interface()
- }
- }
- return m
-}
-
-func (b *BasePortal) ErrorHandle(errType int, errMsg string) {
- var errBody []byte
-
- switch errType {
- case model.ErrorBadRequest:
- errBody = model.ErrorBadRequestStatus(errMsg)
- case model.ErrorUnauthorized:
- errBody = model.ErrorUnauthorizedStatus(errMsg)
- case model.ErrorForbidden:
- errBody = model.ErrorForbiddenStatus(errMsg)
- case model.ErrorNotFound:
- errBody = model.ErrorNotFoundStatus(errMsg)
- case model.ErrorInternalServer:
- errBody = model.ErrorInternalServerStatus(errMsg)
- default:
- errBody = model.ErrorNotImplementedStatus(errMsg)
- }
-
- b.Ctx.Output.SetStatus(errType)
- b.Ctx.Output.Header("Content-Type", "application/json; charset=utf-8")
- b.Ctx.Output.Body(errBody)
- log.Error(errMsg)
-}
-
-func (b *BasePortal) SuccessHandle(status int, body []byte) {
- b.Ctx.Output.SetStatus(status)
- b.Ctx.Output.Header("Content-Type", "application/json; charset=utf-8")
- if body != nil {
- b.Ctx.Output.Body(body)
- }
-}
diff --git a/pkg/api/controllers/dock.go b/pkg/api/controllers/dock.go
deleted file mode 100755
index 14e965034..000000000
--- a/pkg/api/controllers/dock.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements a entry into the OpenSDS northbound service.
-
-*/
-
-package controllers
-
-import (
- "encoding/json"
- "fmt"
-
- "github.com/opensds/opensds/pkg/api/policy"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
-)
-
-// DockPortal
-type DockPortal struct {
- BasePortal
-}
-
-// ListDocks
-func (d *DockPortal) ListDocks() {
- if !policy.Authorize(d.Ctx, "dock:list") {
- return
- }
- // Call db api module to handle list docks request.
- m, err := d.GetParameters()
- if err != nil {
- errMsg := fmt.Sprintf("list docks failed: %s", err.Error())
- d.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
- result, err := db.C.ListDocksWithFilter(c.GetContext(d.Ctx), m)
- if err != nil {
- errMsg := fmt.Sprintf("list docks failed: %s", err.Error())
- d.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- // Marshal the result.
- body, err := json.Marshal(result)
- if err != nil {
- errMsg := fmt.Sprintf("marshal docks failed: %s", err.Error())
- d.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- d.SuccessHandle(StatusOK, body)
- return
-}
-
-// GetDock
-func (d *DockPortal) GetDock() {
- if !policy.Authorize(d.Ctx, "dock:get") {
- return
- }
- id := d.Ctx.Input.Param(":dockId")
- result, err := db.C.GetDock(c.GetContext(d.Ctx), id)
- if err != nil {
- errMsg := fmt.Sprintf("dock %s not found: %s", id, err.Error())
- d.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- // Marshal the result.
- body, err := json.Marshal(result)
- if err != nil {
- errMsg := fmt.Sprintf("marshal dock failed: %s", err.Error())
- d.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- d.SuccessHandle(StatusOK, body)
- return
-}
diff --git a/pkg/api/controllers/dock_test.go b/pkg/api/controllers/dock_test.go
deleted file mode 100755
index 1924aa63d..000000000
--- a/pkg/api/controllers/dock_test.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package controllers
-
-import (
- "encoding/json"
- "errors"
- "net/http"
- "net/http/httptest"
- "reflect"
- "testing"
-
- "github.com/astaxie/beego"
- "github.com/astaxie/beego/context"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
- . "github.com/opensds/opensds/testutils/collection"
- dbtest "github.com/opensds/opensds/testutils/db/testing"
-)
-
-var assertTestResult = func(t *testing.T, got, expected interface{}) {
- t.Helper()
- if !reflect.DeepEqual(got, expected) {
- t.Errorf("expected %v, got %v\n", expected, got)
- }
-}
-
-func init() {
- var dockPortal DockPortal
- beego.Router("/v1beta/docks", &dockPortal, "get:ListDocks")
- beego.Router("/v1beta/docks/:dockId", &dockPortal, "get:GetDock")
-}
-
-func TestListDocks(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- var sampleDocks = []*model.DockSpec{&SampleDocks[0]}
- mockClient := new(dbtest.Client)
- m := map[string][]string{
- "offset": {"0"},
- "limit": {"1"},
- "sortDir": {"asc"},
- "sortKey": {"name"},
- }
- mockClient.On("ListDocksWithFilter", c.NewAdminContext(), m).Return(sampleDocks, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/docks?offset=0&limit=1&sortDir=asc&sortKey=name", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output []*model.DockSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, output, sampleDocks)
- })
-
- t.Run("Should return 500 if list docks with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- m := map[string][]string{
- "offset": {"0"},
- "limit": {"1"},
- "sortDir": {"asc"},
- "sortKey": {"name"},
- }
- mockClient.On("ListDocksWithFilter", c.NewAdminContext(), m).Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/docks?offset=0&limit=1&sortDir=asc&sortKey=name", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 500)
- })
-}
-
-func TestGetDock(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetDock", c.NewAdminContext(), "b7602e18-771e-11e7-8f38-dbd6d291f4e0").Return(&SampleDocks[0], nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET",
- "/v1beta/docks/b7602e18-771e-11e7-8f38-dbd6d291f4e0", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output model.DockSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, &output, &SampleDocks[0])
- })
-
- t.Run("Should return 404 if get docks with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetDock", c.NewAdminContext(), "b7602e18-771e-11e7-8f38-dbd6d291f4e0").Return(
- nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("GET",
- "/v1beta/docks/b7602e18-771e-11e7-8f38-dbd6d291f4e0", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 404)
- })
-}
diff --git a/pkg/api/controllers/fileshare.go b/pkg/api/controllers/fileshare.go
deleted file mode 100644
index 202ce637c..000000000
--- a/pkg/api/controllers/fileshare.go
+++ /dev/null
@@ -1,760 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package controllers
-
-import (
- "context"
- "encoding/json"
- "fmt"
-
- log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/api/policy"
- "github.com/opensds/opensds/pkg/api/util"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/controller/client"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- "github.com/opensds/opensds/pkg/utils"
- . "github.com/opensds/opensds/pkg/utils/config"
- "github.com/opensds/opensds/pkg/utils/constants"
-)
-
-func NewFileSharePortal() *FileSharePortal {
- return &FileSharePortal{
- CtrClient: client.NewClient(),
- }
-}
-
-type FileSharePortal struct {
- BasePortal
-
- CtrClient client.Client
-}
-
-// Function to store Acl's related entry into databse
-func (f *FileSharePortal) CreateFileShareAcl() {
- if !policy.Authorize(f.Ctx, "fileshareacl:create") {
- return
- }
- ctx := c.GetContext(f.Ctx)
- var fileshareacl = model.FileShareAclSpec{
- BaseModel: &model.BaseModel{},
- }
-
- // Unmarshal the request body
- if err := json.NewDecoder(f.Ctx.Request.Body).Decode(&fileshareacl); err != nil {
- reason := fmt.Sprintf("parse fileshare access rules request body failed: %s", err.Error())
- f.ErrorHandle(model.ErrorBadRequest, reason)
- log.Error(reason)
- return
- }
-
- fileshare, err := db.C.GetFileShare(ctx, fileshareacl.FileShareId)
- if err != nil {
- reason := fmt.Sprintf("getFileshare failed in create fileshare acl: %s", err.Error())
- f.ErrorHandle(model.ErrorBadRequest, reason)
- log.Error(reason)
- return
- }
- // If user doesn't specified profile, using profile derived from fileshare
- if len(fileshareacl.ProfileId) == 0 {
- log.Warning("User doesn't specified profile id, using profile derived from file share")
- fileshareacl.ProfileId = fileshare.ProfileId
- }
- prf, err := db.C.GetProfile(ctx, fileshareacl.ProfileId)
- if err != nil {
- errMsg := fmt.Sprintf("get profile failed: %s", err.Error())
- f.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- result, err := util.CreateFileShareAclDBEntry(c.GetContext(f.Ctx), &fileshareacl)
- if err != nil {
- reason := fmt.Sprintf("createFileshareAcldbentry failed: %s", err.Error())
- f.ErrorHandle(model.ErrorBadRequest, reason)
- log.Error(reason)
- return
- }
-
- // Marshal the result.
- body, err := json.Marshal(result)
- if err != nil {
- reason := fmt.Sprintf("marshal fileshare access rules created result failed: %s", err.Error())
- f.ErrorHandle(model.ErrorBadRequest, reason)
- log.Error(reason)
- return
- }
- f.SuccessHandle(StatusAccepted, body)
-
- // FileShare acl access creation request is sent to dock and drivers
- if err := f.CtrClient.Connect(CONF.OsdsLet.ApiEndpoint); err != nil {
- log.Error("when connecting controller client:", err)
- return
- }
-
- opt := &pb.CreateFileShareAclOpts{
- Id: result.Id,
- FileshareId: result.FileShareId,
- Description: result.Description,
- Type: result.Type,
- AccessCapability: result.AccessCapability,
- AccessTo: result.AccessTo,
- Metadata: fileshare.Metadata,
- Context: ctx.ToJson(),
- Profile: prf.ToJson(),
- }
- response, err := f.CtrClient.CreateFileShareAcl(context.Background(), opt)
- if err != nil {
- log.Error("create file share acl failed in controller service:", err)
- return
- }
- if errorMsg := response.GetError(); errorMsg != nil {
- log.Errorf("failed to create file share acl in controller, code: %v, message: %v",
- errorMsg.GetCode(), errorMsg.GetDescription())
- return
- }
-
- return
-}
-
-func (f *FileSharePortal) ListFileSharesAcl() {
- if !policy.Authorize(f.Ctx, "fileshareacl:list") {
- return
- }
- m, err := f.GetParameters()
- if err != nil {
- errMsg := fmt.Sprintf("list fileshares failed: %s", err.Error())
- f.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
- result, err := db.C.ListFileSharesAclWithFilter(c.GetContext(f.Ctx), m)
- if err != nil {
- errMsg := fmt.Sprintf("list fileshares failed: %s", err.Error())
- f.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
- // Marshal the result.
- body, _ := json.Marshal(result)
- f.SuccessHandle(StatusOK, body)
-
- return
-}
-
-// Function to store fileshare related entry into databse
-func (f *FileSharePortal) CreateFileShare() {
- if !policy.Authorize(f.Ctx, "fileshare:create") {
- return
- }
- ctx := c.GetContext(f.Ctx)
- var fileshare = model.FileShareSpec{
- BaseModel: &model.BaseModel{},
- }
-
- // Unmarshal the request body
- if err := json.NewDecoder(f.Ctx.Request.Body).Decode(&fileshare); err != nil {
- reason := fmt.Sprintf("parse fileshare request body failed: %s", err.Error())
- f.ErrorHandle(model.ErrorBadRequest, reason)
- log.Error(reason)
- return
- }
-
- // make the fileshare name global so that we can use at multiple place
- var fileshareMetadata map[string]string
- fileshareMetadata = fileshare.Metadata
- var snapshotName string
-
- // Validate the snapthot existance
- if fileshare.SnapshotId != "" {
- snapshot, _err := db.C.GetFileShareSnapshot(ctx, fileshare.SnapshotId)
- if _err != nil {
- errMsg := fmt.Sprintf("give valid snapshotId %s. %s", fileshare.SnapshotId, _err.Error())
- f.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
- existingFs, _err := db.C.GetFileShare(ctx, snapshot.FileShareId)
- if _err != nil {
- errMsg := fmt.Sprintf("This snapshot %s is not associated with any filesystem.", fileshare.SnapshotId)
- f.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
- fileshareMetadata = existingFs.Metadata
- snapshotName = snapshot.Name
- }
-
- // Get profile
- var prf *model.ProfileSpec
- var err error
- if fileshare.ProfileId == "" {
- log.Warning("Use default profile when user doesn't specify profile.")
- prf, err = db.C.GetDefaultProfileFileShare(ctx)
- if err != nil {
- errMsg := fmt.Sprintf("get profile failed: %s", err.Error())
- f.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
- fileshare.ProfileId = prf.Id
- } else {
- prf, err = db.C.GetProfile(ctx, fileshare.ProfileId)
- if err != nil {
- errMsg := fmt.Sprintf("get profile failed: %s", err.Error())
- f.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
- if prf.StorageType != constants.File {
- errMsg := fmt.Sprintf("storageType should be only file. Currently it is: %s", prf.StorageType)
- log.Error(errMsg)
- f.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
- }
-
- // NOTE: It will create a file share entry into the database and initialize its status
- // as "creating". It will not wait for the real file share creation to complete
- // and will return result immediately.
- result, err := util.CreateFileShareDBEntry(c.GetContext(f.Ctx), &fileshare)
- if err != nil {
- reason := fmt.Sprintf("create fileshare failed: %s", err.Error())
- f.ErrorHandle(model.ErrorBadRequest, reason)
- log.Error(reason)
- return
- }
- // Marshal the result.
- body, err := json.Marshal(result)
- if err != nil {
- reason := fmt.Sprintf("marshal fileshare created result failed: %s", err.Error())
- f.ErrorHandle(model.ErrorBadRequest, reason)
- log.Error(reason)
- return
- }
- f.SuccessHandle(StatusAccepted, body)
-
- // NOTE: The real file share creation process.
- // FileShare creation request is sent to the Dock. Dock will update file share status to "available"
- // after file share creation is completed.
- if err := f.CtrClient.Connect(CONF.OsdsLet.ApiEndpoint); err != nil {
- log.Error("when connecting controller client:", err)
- return
- }
-
- opt := &pb.CreateFileShareOpts{
- Id: result.Id,
- Name: result.Name,
- Description: result.Description,
- Size: result.Size,
- AvailabilityZone: result.AvailabilityZone,
- Profile: prf.ToJson(),
- PoolId: result.PoolId,
- ExportLocations: result.ExportLocations,
- SnapshotId: result.SnapshotId,
- SnapshotName: snapshotName,
- Metadata: fileshareMetadata,
- Context: ctx.ToJson(),
- }
- response, err := f.CtrClient.CreateFileShare(context.Background(), opt)
- if err != nil {
- log.Error("create file share failed in controller service:", err)
- return
- }
- if errorMsg := response.GetError(); errorMsg != nil {
- log.Errorf("failed to create file share in controller, code: %v, message: %v",
- errorMsg.GetCode(), errorMsg.GetDescription())
- return
- }
-
- return
-}
-
-func (f *FileSharePortal) ListFileShares() {
- if !policy.Authorize(f.Ctx, "fileshare:list") {
- return
- }
- m, err := f.GetParameters()
- if err != nil {
- errMsg := fmt.Sprintf("list fileshares failed: %s", err.Error())
- f.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
- result, err := db.C.ListFileSharesWithFilter(c.GetContext(f.Ctx), m)
- if err != nil {
- errMsg := fmt.Sprintf("list fileshares failed: %s", err.Error())
- f.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
- // Marshal the result.
- body, _ := json.Marshal(result)
- f.SuccessHandle(StatusOK, body)
-
- return
-}
-
-func (f *FileSharePortal) GetFileShareAcl() {
- if !policy.Authorize(f.Ctx, "fileshareacl:get") {
- return
- }
- id := f.Ctx.Input.Param(":aclId")
-
- // Call db api module to handle get fileshare request.
- result, err := db.C.GetFileShareAcl(c.GetContext(f.Ctx), id)
- if err != nil {
- errMsg := fmt.Sprintf("fileshare acl %s not found: %s", id, err.Error())
- f.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- // Marshal the result.
- body, _ := json.Marshal(result)
- if err != nil {
- reason := fmt.Sprintf("marshal fileshare acl list result failed: %s", err.Error())
- f.ErrorHandle(model.ErrorBadRequest, reason)
- log.Error(reason)
- return
- }
- f.SuccessHandle(StatusOK, body)
-
- return
-}
-
-func (f *FileSharePortal) GetFileShare() {
- if !policy.Authorize(f.Ctx, "fileshare:get") {
- return
- }
- id := f.Ctx.Input.Param(":fileshareId")
-
- // Call db api module to handle get file share request.
- result, err := db.C.GetFileShare(c.GetContext(f.Ctx), id)
- if err != nil {
- errMsg := fmt.Sprintf("fileshare %s not found: %s", id, err.Error())
- f.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- // Marshal the result.
- body, _ := json.Marshal(result)
- if err != nil {
- reason := fmt.Sprintf("marshal fileshare list result failed: %s", err.Error())
- f.ErrorHandle(model.ErrorBadRequest, reason)
- log.Error(reason)
- return
- }
- f.SuccessHandle(StatusOK, body)
-
- return
-}
-
-func (f *FileSharePortal) UpdateFileShare() {
- if !policy.Authorize(f.Ctx, "fileshare:update") {
- return
- }
- var fshare = model.FileShareSpec{
- BaseModel: &model.BaseModel{},
- }
-
- id := f.Ctx.Input.Param(":fileshareId")
- if err := json.NewDecoder(f.Ctx.Request.Body).Decode(&fshare); err != nil {
- errMsg := fmt.Sprintf("parse fileshare request body failed: %s", err.Error())
- f.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- fshare.Id = id
- result, err := db.C.UpdateFileShare(c.GetContext(f.Ctx), &fshare)
- if err != nil {
- errMsg := fmt.Sprintf("update fileshare failed: %s", err.Error())
- f.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- // Marshal the result.
- body, _ := json.Marshal(result)
- f.SuccessHandle(StatusOK, body)
-
- return
-}
-
-func (f *FileSharePortal) DeleteFileShareAcl() {
- if !policy.Authorize(f.Ctx, "fileshareacl:delete") {
- return
- }
- ctx := c.GetContext(f.Ctx)
-
- id := f.Ctx.Input.Param(":aclId")
- acl, err := db.C.GetFileShareAcl(ctx, id)
- if err != nil {
- errMsg := fmt.Sprintf("fileshare acl %s not found: %s", id, err.Error())
- f.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
- fileshare, err := db.C.GetFileShare(ctx, acl.FileShareId)
- if err != nil {
- errMsg := fmt.Sprintf("fileshare for the acl %s not found: %s", id, err.Error())
- f.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
- prf, err := db.C.GetProfile(ctx, fileshare.ProfileId)
- if err != nil {
- errMsg := fmt.Sprintf("get profile failed: %s", err.Error())
- f.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // NOTE: It will update the the status of the file share acl waiting for deletion
- // in the database to "deleting" and return the result immediately.
- if err = util.DeleteFileShareAclDBEntry(ctx, acl); err != nil {
- errMsg := fmt.Sprintf("delete fileshare acl failed: %v", err.Error())
- f.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- f.SuccessHandle(StatusAccepted, nil)
-
- // NOTE: The real file share deletion process.
- // File Share deletion request is sent to the Dock. Dock will delete file share from driver
- // and database or update file share status to "errorDeleting" if deletion from driver failed.
- if err := f.CtrClient.Connect(CONF.OsdsLet.ApiEndpoint); err != nil {
- log.Error("when connecting controller client:", err)
- return
- }
-
- opt := &pb.DeleteFileShareAclOpts{
- Id: acl.Id,
- FileshareId: acl.FileShareId,
- Description: acl.Description,
- Type: acl.Type,
- AccessCapability: acl.AccessCapability,
- AccessTo: acl.AccessTo,
- Metadata: utils.MergeStringMaps(fileshare.Metadata, acl.Metadata),
- Context: ctx.ToJson(),
- Profile: prf.ToJson(),
- }
- response, err := f.CtrClient.DeleteFileShareAcl(context.Background(), opt)
- if err != nil {
- log.Error("delete fileshare acl failed in controller service:", err)
- return
- }
- if errorMsg := response.GetError(); errorMsg != nil {
- log.Errorf("failed to delete fileshare acl in controller, code: %v, message: %v",
- errorMsg.GetCode(), errorMsg.GetDescription())
- return
- }
-
- return
-}
-
-func (f *FileSharePortal) DeleteFileShare() {
- if !policy.Authorize(f.Ctx, "fileshare:delete") {
- return
- }
- ctx := c.GetContext(f.Ctx)
-
- id := f.Ctx.Input.Param(":fileshareId")
- fileshare, err := db.C.GetFileShare(ctx, id)
- if err != nil {
- errMsg := fmt.Sprintf("fileshare %s not found: %s", id, err.Error())
- f.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
- prf, err := db.C.GetProfile(ctx, fileshare.ProfileId)
- if err != nil {
- errMsg := fmt.Sprintf("delete file share failed: %v", err.Error())
- f.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- // If profileId or poolId of the file share doesn't exist, it would mean that
- // the file share provisioning operation failed before the create method in
- // storage driver was called, therefore the file share entry should be deleted
- // from db directly.
- if fileshare.ProfileId == "" || fileshare.PoolId == "" {
- if err := db.C.DeleteFileShare(ctx, fileshare.Id); err != nil {
- errMsg := fmt.Sprintf("delete file share failed: %v", err.Error())
- f.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
- f.SuccessHandle(StatusAccepted, nil)
- return
- }
-
- // NOTE: It will update the the status of the file share waiting for deletion in
- // the database to "deleting" and return the result immediately.
- if err = util.DeleteFileShareDBEntry(ctx, fileshare); err != nil {
- errMsg := fmt.Sprintf("delete fileshare failed: %v", err.Error())
- f.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- f.SuccessHandle(StatusAccepted, nil)
-
- // NOTE: The real file share deletion process.
- // File Share deletion request is sent to the Dock. Dock will delete file share from driver
- // and database or update file share status to "errorDeleting" if deletion from driver failed.
- if err := f.CtrClient.Connect(CONF.OsdsLet.ApiEndpoint); err != nil {
- log.Error("when connecting controller client:", err)
- return
- }
-
- opt := &pb.DeleteFileShareOpts{
- Id: fileshare.Id,
- PoolId: fileshare.PoolId,
- Metadata: fileshare.Metadata,
- Context: ctx.ToJson(),
- Profile: prf.ToJson(),
- ExportLocations: fileshare.ExportLocations,
- }
- response, err := f.CtrClient.DeleteFileShare(context.Background(), opt)
- if err != nil {
- log.Error("delete fileshare failed in controller service:", err)
- return
- }
- if errorMsg := response.GetError(); errorMsg != nil {
- log.Errorf("failed to delete fileshare in controller, code: %v, message: %v",
- errorMsg.GetCode(), errorMsg.GetDescription())
- return
- }
-
- return
-}
-
-func NewFileShareSnapshotPortal() *FileShareSnapshotPortal {
- return &FileShareSnapshotPortal{
- CtrClient: client.NewClient(),
- }
-}
-
-type FileShareSnapshotPortal struct {
- BasePortal
-
- CtrClient client.Client
-}
-
-func (f *FileShareSnapshotPortal) CreateFileShareSnapshot() {
- if !policy.Authorize(f.Ctx, "snapshot:create") {
- return
- }
- ctx := c.GetContext(f.Ctx)
- var snapshot = model.FileShareSnapshotSpec{
- BaseModel: &model.BaseModel{},
- }
-
- if err := json.NewDecoder(f.Ctx.Request.Body).Decode(&snapshot); err != nil {
- errMsg := fmt.Sprintf("parse fileshare snapshot request body failed: %s", err.Error())
- f.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- fileshare, err := db.C.GetFileShare(ctx, snapshot.FileShareId)
- if err != nil {
- errMsg := fmt.Sprintf("fileshare %s not found: %s", snapshot.FileShareId, err.Error())
- f.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
- snapshot.ShareSize = fileshare.Size
- // Usually snapshot.SnapshotSize and fileshare.Size are equal, even if they
- // are not equal, then snapshot.SnapshotSize will be updated to the correct value.
- snapshot.SnapshotSize = fileshare.Size
-
- if len(snapshot.ProfileId) == 0 {
- log.Warning("User doesn't specified profile id, using profile derived form fileshare")
- snapshot.ProfileId = fileshare.ProfileId
- }
-
- // Get profile
- prf, err := db.C.GetProfile(ctx, snapshot.ProfileId)
- if err != nil {
- errMsg := fmt.Sprintf("get profile failed: %s", err.Error())
- f.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // NOTE:It will create a fileshare snapshot entry into the database and initialize its status
- // as "creating". It will not wait for the real fileshare snapshot creation to complete
- // and will return result immediately.
- result, err := util.CreateFileShareSnapshotDBEntry(ctx, &snapshot)
- if err != nil {
- errMsg := fmt.Sprintf("create fileshare snapshot failed: %s", err.Error())
- f.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // Marshal the result.
- body, _ := json.Marshal(result)
- f.SuccessHandle(StatusAccepted, body)
-
- // NOTE:The real file share snapshot creation process.
- // FileShare snapshot creation request is sent to the Dock. Dock will update file share snapshot status to "available"
- // after file share snapshot creation complete.
- if err := f.CtrClient.Connect(CONF.OsdsLet.ApiEndpoint); err != nil {
- log.Error("when connecting controller client:", err)
- return
- }
-
- opt := &pb.CreateFileShareSnapshotOpts{
- Id: result.Id,
- Name: result.Name,
- Description: result.Description,
- FileshareId: result.FileShareId,
- Size: result.ShareSize,
- Context: ctx.ToJson(),
- Metadata: result.Metadata,
- Profile: prf.ToJson(),
- }
- response, err := f.CtrClient.CreateFileShareSnapshot(context.Background(), opt)
- if err != nil {
- log.Error("create file share snapthot failed in controller service:", err)
- return
- }
- if errorMsg := response.GetError(); errorMsg != nil {
- log.Errorf("failed to create file share snapshot in controller, code: %v, message: %v",
- errorMsg.GetCode(), errorMsg.GetDescription())
- return
- }
-
- return
-}
-
-func (f *FileShareSnapshotPortal) ListFileShareSnapshots() {
- if !policy.Authorize(f.Ctx, "snapshot:list") {
- return
- }
- m, err := f.GetParameters()
- if err != nil {
- errMsg := fmt.Sprintf("list fileshare snapshots failed: %s", err.Error())
- f.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- result, err := db.C.ListFileShareSnapshotsWithFilter(c.GetContext(f.Ctx), m)
- if err != nil {
- errMsg := fmt.Sprintf("list fileshare snapshots failed: %s", err.Error())
- f.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- // Marshal the result.
- body, _ := json.Marshal(result)
- f.SuccessHandle(StatusOK, body)
- return
-}
-
-func (f *FileShareSnapshotPortal) GetFileShareSnapshot() {
- if !policy.Authorize(f.Ctx, "snapshot:get") {
- return
- }
- id := f.Ctx.Input.Param(":snapshotId")
-
- result, err := db.C.GetFileShareSnapshot(c.GetContext(f.Ctx), id)
- if err != nil {
- errMsg := fmt.Sprintf("fileshare snapshot %s not found: %s", id, err.Error())
- f.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- // Marshal the result.
- body, _ := json.Marshal(result)
- f.SuccessHandle(StatusOK, body)
-
- return
-}
-
-func (f *FileShareSnapshotPortal) UpdateFileShareSnapshot() {
- if !policy.Authorize(f.Ctx, "snapshot:update") {
- return
- }
- var snapshot = model.FileShareSnapshotSpec{
- BaseModel: &model.BaseModel{},
- }
-
- id := f.Ctx.Input.Param(":snapshotId")
- if err := json.NewDecoder(f.Ctx.Request.Body).Decode(&snapshot); err != nil {
- errMsg := fmt.Sprintf("parse fileshare snapshot request body failed: %s", err.Error())
- f.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
- snapshot.Id = id
-
- result, err := db.C.UpdateFileShareSnapshot(c.GetContext(f.Ctx), id, &snapshot)
- if err != nil {
- errMsg := fmt.Sprintf("update fileshare snapshot failed: %s", err.Error())
- f.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- // Marshal the result.
- body, _ := json.Marshal(result)
- f.SuccessHandle(StatusOK, body)
-
- return
-}
-
-func (f *FileShareSnapshotPortal) DeleteFileShareSnapshot() {
- if !policy.Authorize(f.Ctx, "snapshot:delete") {
- return
- }
- ctx := c.GetContext(f.Ctx)
- id := f.Ctx.Input.Param(":snapshotId")
-
- snapshot, err := db.C.GetFileShareSnapshot(ctx, id)
- if err != nil {
- errMsg := fmt.Sprintf("fileshare snapshot %s not found: %s", id, err.Error())
- f.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- prf, err := db.C.GetProfile(ctx, snapshot.ProfileId)
- if err != nil {
- errMsg := fmt.Sprintf("profile (%s) not found: %v", snapshot.ProfileId, err.Error())
- f.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // NOTE: It will update the the status of the file share snapshot waiting for deletion in
- // the database to "deleting" and return the result immediately.
- err = util.DeleteFileShareSnapshotDBEntry(ctx, snapshot)
- if err != nil {
- errMsg := fmt.Sprintf("delete file share snapshot in db failed: %v", err.Error())
- f.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- f.Ctx.Output.SetStatus(StatusAccepted)
-
- // NOTE:The real file share snapshot deletion process.
- // FileShare snapshot deletion request is sent to the Dock. Dock will delete file share snapshot from driver and
- // database or update its status to "errorDeleting" if file share snapshot deletion from driver failed.
- if err := f.CtrClient.Connect(CONF.OsdsLet.ApiEndpoint); err != nil {
- log.Error("when connecting controller client:", err)
- return
- }
-
- opt := &pb.DeleteFileShareSnapshotOpts{
- Id: snapshot.Id,
- FileshareId: snapshot.FileShareId,
- Context: ctx.ToJson(),
- Profile: prf.ToJson(),
- Metadata: snapshot.Metadata,
- }
- response, err := f.CtrClient.DeleteFileShareSnapshot(context.Background(), opt)
- if err != nil {
- log.Error("delete file share snapshot failed in controller service:", err)
- return
- }
- if errorMsg := response.GetError(); errorMsg != nil {
- log.Errorf("failed to delete file share snapshot in controller, code: %v, message: %v",
- errorMsg.GetCode(), errorMsg.GetDescription())
- return
- }
-
- return
-}
diff --git a/pkg/api/controllers/fileshare_test.go b/pkg/api/controllers/fileshare_test.go
deleted file mode 100644
index 03cd8fbf6..000000000
--- a/pkg/api/controllers/fileshare_test.go
+++ /dev/null
@@ -1,370 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package controllers
-
-import (
- "bytes"
- ctx "context"
- "encoding/json"
- "errors"
- "net/http"
- "net/http/httptest"
- "testing"
-
- "github.com/astaxie/beego"
- "github.com/astaxie/beego/context"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- . "github.com/opensds/opensds/testutils/collection"
- ctrtest "github.com/opensds/opensds/testutils/controller/testing"
- dbtest "github.com/opensds/opensds/testutils/db/testing"
-)
-
-////////////////////////////////////////////////////////////////////////////////
-// Prepare for mock server //
-////////////////////////////////////////////////////////////////////////////////
-func init() {
- beego.Router("/v1beta/file/shares", NewFakeFileSharePortal(),
- "post:CreateFileShare;get:ListFileShares")
- beego.Router("/v1beta/file/shares/:fileshareId", NewFakeFileSharePortal(),
- "get:GetFileShare;put:UpdateFileShare;delete:DeleteFileShare")
-
- beego.Router("/v1beta/file/snapshots", &FileShareSnapshotPortal{},
- "post:CreateFileShareSnapshot;get:ListFileShareSnapshots")
- beego.Router("/v1beta/file/snapshots/:snapshotId", &FileShareSnapshotPortal{},
- "get:GetFileShareSnapshot;put:UpdateFileShareSnapshot;delete:DeleteFileShareSnapshot")
-
-}
-
-func NewFakeFileSharePortal() *FileSharePortal {
- mockClient := new(ctrtest.Client)
-
- mockClient.On("Connect", "localhost:50049").Return(nil)
- mockClient.On("Close").Return(nil)
- mockClient.On("CreateFileShare", ctx.Background(), &pb.CreateFileShareOpts{
- Context: c.NewAdminContext().ToJson(),
- }).Return(&pb.GenericResponse{}, nil)
- mockClient.On("DeleteFileShare", ctx.Background(), &pb.DeleteFileShareOpts{
- Context: c.NewAdminContext().ToJson(),
- }).Return(&pb.GenericResponse{}, nil)
-
- return &FileSharePortal{
- CtrClient: mockClient,
- }
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Tests for FileShare //
-////////////////////////////////////////////////////////////////////////////////
-
-var (
- fakeFileShare = &model.FileShareSpec{
- BaseModel: &model.BaseModel{
- Id: "f4a5e666-c669-4c64-a2a1-8f9ecd560c78",
- CreatedAt: "2017-10-24T16:21:32",
- },
- Name: "fake FileShare",
- Description: "fake FileShare",
- Size: 99,
- AvailabilityZone: "unknown",
- Status: "available",
- PoolId: "831fa5fb-17cf-4410-bec6-1f4b06208eef",
- ProfileId: "d3a109ff-3e51-4625-9054-32604c79fa90",
- }
- fakeFileShares = []*model.FileShareSpec{fakeFileShare}
-)
-
-func TestListFileShares(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- var sampleFileShares = []*model.FileShareSpec{&SampleFileShares[0], &SampleFileShares[1]}
- mockClient := new(dbtest.Client)
- m := map[string][]string{
- "offset": {"0"},
- "limit": {"1"},
- "sortDir": {"asc"},
- "sortKey": {"name"},
- }
- mockClient.On("ListFileSharesWithFilter", c.NewAdminContext(), m).Return(sampleFileShares, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/file/shares?offset=0&limit=1&sortDir=asc&sortKey=name", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
-
- var output []*model.FileShareSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, output, sampleFileShares)
- })
-
- t.Run("Should return 500 if list file share with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- m := map[string][]string{
- "offset": {"0"},
- "limit": {"1"},
- "sortDir": {"asc"},
- "sortKey": {"name"},
- }
- mockClient.On("ListFileSharesWithFilter", c.NewAdminContext(), m).Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/file/shares?offset=0&limit=1&sortDir=asc&sortKey=name", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 500)
- })
-}
-
-func TestGetFileShare(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetFileShare", c.NewAdminContext(), "bd5b12a8-a101-11e7-941e-d77981b584d8").Return(&SampleFileShares[0], nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/file/shares/bd5b12a8-a101-11e7-941e-d77981b584d8", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
-
- var output model.FileShareSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, &output, &SampleFileShares[0])
- })
-
- t.Run("Should return 404 if get file share replication with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetFileShare", c.NewAdminContext(), "bd5b12a8-a101-11e7-941e-d77981b584d8").Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/file/shares/bd5b12a8-a101-11e7-941e-d77981b584d8", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 404)
- })
-}
-
-func TestUpdateFileShare(t *testing.T) {
- var jsonStr = []byte(`{
- "id": "bd5b12a8-a101-11e7-941e-d77981b584d8",
- "name":"fake FileShare",
- "description":"fake Fileshare"
- }`)
- var expectedJson = []byte(`{
- "id": "bd5b12a8-a101-11e7-941e-d77981b584d8",
- "name": "fake FileShare",
- "description": "fake FileShare",
- "size": 1,
- "status": "available",
- "poolId": "084bf71e-a102-11e7-88a8-e31fe6d52248",
- "profileId": "1106b972-66ef-11e7-b172-db03f3689c9c"
- }`)
- var expected model.FileShareSpec
- json.Unmarshal(expectedJson, &expected)
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- fileshare := model.FileShareSpec{BaseModel: &model.BaseModel{}}
- json.NewDecoder(bytes.NewBuffer(jsonStr)).Decode(&fileshare)
- mockClient := new(dbtest.Client)
- mockClient.On("UpdateFileShare", c.NewAdminContext(), &fileshare).Return(&expected, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("PUT", "/v1beta/file/shares/bd5b12a8-a101-11e7-941e-d77981b584d8", bytes.NewBuffer(jsonStr))
- w := httptest.NewRecorder()
- r.Header.Set("Content-Type", "application/JSON")
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output model.FileShareSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, &output, &expected)
- })
-
- t.Run("Should return 500 if update file share with bad request", func(t *testing.T) {
- fileshare := model.FileShareSpec{BaseModel: &model.BaseModel{}}
- json.NewDecoder(bytes.NewBuffer(jsonStr)).Decode(&fileshare)
- mockClient := new(dbtest.Client)
- mockClient.On("UpdateFileShare", c.NewAdminContext(), &fileshare).Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("PUT", "/v1beta/file/shares/bd5b12a8-a101-11e7-941e-d77981b584d8", bytes.NewBuffer(jsonStr))
- w := httptest.NewRecorder()
- r.Header.Set("Content-Type", "application/JSON")
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 500)
- })
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Tests for fileshare snapshot //
-////////////////////////////////////////////////////////////////////////////////
-
-func TestListFileShareSnapshots(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- var sampleSnapshots = []*model.FileShareSnapshotSpec{&SampleFileShareSnapshots[0], &SampleFileShareSnapshots[1]}
- mockClient := new(dbtest.Client)
- m := map[string][]string{
- "offset": {"0"},
- "limit": {"1"},
- "sortDir": {"asc"},
- "sortKey": {"name"},
- }
- mockClient.On("ListFileShareSnapshotsWithFilter", c.NewAdminContext(), m).Return(sampleSnapshots, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/file/snapshots?offset=0&limit=1&sortDir=asc&sortKey=name", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
-
- var output []*model.FileShareSnapshotSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, output, sampleSnapshots)
- })
-
- t.Run("Should return 500 if list fileshare snapshots with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- m := map[string][]string{
- "offset": {"0"},
- "limit": {"1"},
- "sortDir": {"asc"},
- "sortKey": {"name"},
- }
- mockClient.On("ListFileShareSnapshotsWithFilter", c.NewAdminContext(), m).Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/file/snapshots?offset=0&limit=1&sortDir=asc&sortKey=name", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 500)
- })
-}
-
-func TestGetFileShareSnapshot(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetFileShareSnapshot", c.NewAdminContext(), "3769855c-a102-11e7-b772-17b880d2f537").Return(&SampleFileShareSnapshots[0], nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/file/snapshots/3769855c-a102-11e7-b772-17b880d2f537", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output model.FileShareSnapshotSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, &output, &SampleFileShareSnapshots[0])
- })
-
- t.Run("Should return 404 if get fileshare group with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetFileShareSnapshot", c.NewAdminContext(), "3769855c-a102-11e7-b772-17b880d2f537").Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/file/snapshots/3769855c-a102-11e7-b772-17b880d2f537", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 404)
- })
-}
-
-func TestUpdateFileShareSnapshot(t *testing.T) {
- var jsonStr = []byte(`{
- "id": "3769855c-a102-11e7-b772-17b880d2f537",
- "name":"fake snapshot",
- "description":"fake snapshot"
- }`)
- var expectedJson = []byte(`{
- "id": "3769855c-a102-11e7-b772-17b880d2f537",
- "name": "fake snapshot",
- "description": "fake snapshot",
- "size": 1,
- "status": "available",
- "fileshareId": "bd5b12a8-a101-11e7-941e-d77981b584d8"
- }`)
- var expected model.FileShareSnapshotSpec
- json.Unmarshal(expectedJson, &expected)
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- snapshot := model.FileShareSnapshotSpec{BaseModel: &model.BaseModel{}}
- json.NewDecoder(bytes.NewBuffer(jsonStr)).Decode(&snapshot)
- mockClient := new(dbtest.Client)
- mockClient.On("UpdateFileShareSnapshot", c.NewAdminContext(), snapshot.Id, &snapshot).
- Return(&expected, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("PUT", "/v1beta/file/snapshots/3769855c-a102-11e7-b772-17b880d2f537", bytes.NewBuffer(jsonStr))
- w := httptest.NewRecorder()
- r.Header.Set("Content-Type", "application/JSON")
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output model.FileShareSnapshotSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, &output, &expected)
- })
-
- t.Run("Should return 500 if update fileshare snapshot with bad request", func(t *testing.T) {
- snapshot := model.FileShareSnapshotSpec{BaseModel: &model.BaseModel{}}
- json.NewDecoder(bytes.NewBuffer(jsonStr)).Decode(&snapshot)
- mockClient := new(dbtest.Client)
- mockClient.On("UpdateFileShareSnapshot", c.NewAdminContext(), snapshot.Id, &snapshot).
- Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("PUT", "/v1beta/file/snapshots/3769855c-a102-11e7-b772-17b880d2f537", bytes.NewBuffer(jsonStr))
- w := httptest.NewRecorder()
- r.Header.Set("Content-Type", "application/JSON")
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 500)
- })
-}
diff --git a/pkg/api/controllers/host.go b/pkg/api/controllers/host.go
deleted file mode 100644
index 8a8854a74..000000000
--- a/pkg/api/controllers/host.go
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements a entry into the OpenSDS northbound service.
-
-*/
-
-package controllers
-
-import (
- "encoding/json"
- "fmt"
-
- "github.com/opensds/opensds/pkg/api/policy"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
-)
-
-type HostPortal struct {
- BasePortal
-}
-
-func NewHostPortal() *HostPortal {
- return &HostPortal{}
-}
-
-func (p *HostPortal) ListHosts() {
- if !policy.Authorize(p.Ctx, "host:list") {
- return
- }
-
- m, err := p.GetParameters()
- if err != nil {
- errMsg := fmt.Sprintf("get the query parameters of host failed: %s", err.Error())
- p.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- hosts, err := db.C.ListHosts(c.GetContext(p.Ctx), m)
- if err != nil {
- errMsg := fmt.Sprintf("list hosts failed: %s", err.Error())
- p.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- body, err := json.Marshal(hosts)
- if err != nil {
- errMsg := fmt.Sprintf("marshal hosts failed: %s", err.Error())
- p.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- p.SuccessHandle(StatusOK, body)
- return
-}
-
-func (p *HostPortal) CreateHost() {
- if !policy.Authorize(p.Ctx, "host:create") {
- return
- }
-
- var host = model.HostSpec{
- BaseModel: &model.BaseModel{},
- }
-
- // Unmarshal the request body
- if err := json.NewDecoder(p.Ctx.Request.Body).Decode(&host); err != nil {
- errMsg := fmt.Sprintf("parse host request body failed: %s", err.Error())
- p.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // HostName should be unique in the system
- hostArr, err := db.C.ListHostsByName(c.GetContext(p.Ctx), host.HostName)
- if err != nil {
- errMsg := fmt.Sprintf("check host %s failed in CreateHost method: %v", host.HostName, err)
- p.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
- if len(hostArr) > 0 {
- errMsg := fmt.Sprintf("the host with name %s already exists in the system", host.HostName)
- p.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- result, err := db.C.CreateHost(c.GetContext(p.Ctx), &host)
- if err != nil {
- errMsg := fmt.Sprintf("create host failed: %v", err)
- p.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // Marshal the result.
- body, err := json.Marshal(result)
- if err != nil {
- errMsg := fmt.Sprintf("marshal host created result failed: %s", err.Error())
- p.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- p.SuccessHandle(StatusOK, body)
- return
-}
-
-func (p *HostPortal) GetHost() {
- if !policy.Authorize(p.Ctx, "host:get") {
- return
- }
- id := p.Ctx.Input.Param(":hostId")
- result, err := db.C.GetHost(c.GetContext(p.Ctx), id)
- if err != nil {
- errMsg := fmt.Sprintf("host %s not found: %s", id, err.Error())
- p.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- // Marshal the result.
- body, err := json.Marshal(result)
- if err != nil {
- errMsg := fmt.Sprintf("marshal host failed: %s", err.Error())
- p.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- p.SuccessHandle(StatusOK, body)
- return
-}
-
-func (p *HostPortal) UpdateHost() {
- if !policy.Authorize(p.Ctx, "host:update") {
- return
- }
-
- id := p.Ctx.Input.Param(":hostId")
- var host = model.HostSpec{
- BaseModel: &model.BaseModel{
- Id: id,
- },
- }
- if err := json.NewDecoder(p.Ctx.Request.Body).Decode(&host); err != nil {
- errMsg := fmt.Sprintf("parse host request body failed: %v", err)
- p.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // TODO: Add parameter validation
-
- result, err := db.C.UpdateHost(c.GetContext(p.Ctx), &host)
- if err != nil {
- errMsg := fmt.Sprintf("update host failed: %v", err)
- p.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // Marshal the result.
- body, err := json.Marshal(result)
- if err != nil {
- errMsg := fmt.Sprintf("marshal host updated result failed: %v", err)
- p.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- p.SuccessHandle(StatusOK, body)
- return
-}
-
-func (p *HostPortal) DeleteHost() {
- if !policy.Authorize(p.Ctx, "host:delete") {
- return
- }
- id := p.Ctx.Input.Param(":hostId")
- host, err := db.C.GetHost(c.GetContext(p.Ctx), id)
- if err != nil {
- errMsg := fmt.Sprintf("host %s not found: %s", id, err.Error())
- p.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- // Check relationship with volume
- attachments, err := db.C.ListVolumeAttachmentsWithFilter(c.GetContext(p.Ctx), map[string][]string{"hostId": []string{id}})
- if err != nil {
- errMsg := fmt.Sprintf("list attachments failed in DeleteHost method: %v", err)
- p.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
- if len(attachments) > 0 {
- errMsg := fmt.Sprintf("some volumes are attached to host: %s, please detach them first", host.HostName)
- p.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- err = db.C.DeleteHost(c.GetContext(p.Ctx), id)
- if err != nil {
- errMsg := fmt.Sprintf("delete host failed: %v", err)
- p.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- p.SuccessHandle(StatusOK, nil)
- return
-}
diff --git a/pkg/api/controllers/host_test.go b/pkg/api/controllers/host_test.go
deleted file mode 100644
index 2a74527ed..000000000
--- a/pkg/api/controllers/host_test.go
+++ /dev/null
@@ -1,195 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package controllers
-
-import (
- "bytes"
- "encoding/json"
- "net/http"
- "net/http/httptest"
- "testing"
-
- "github.com/astaxie/beego/context"
- c "github.com/opensds/opensds/pkg/context"
-
- "github.com/astaxie/beego"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
- . "github.com/opensds/opensds/testutils/collection"
- dbtest "github.com/opensds/opensds/testutils/db/testing"
-)
-
-func init() {
- var hostPortal HostPortal
- beego.Router("/v1beta/host/hosts", &hostPortal, "get:ListHosts;post:CreateHost")
- beego.Router("/v1beta/host/hosts/:hostId", &hostPortal, "get:GetHost;put:UpdateHost;delete:DeleteHost")
-}
-
-var (
- ByteHostReq = []byte(`
- {
- "accessMode": "agentless",
- "hostName": "sap1",
- "ip": "192.168.56.12",
- "availabilityZones": [
- "default",
- "az2"
- ],
- "initiators": [
- {
- "portName": "20000024ff5bb888",
- "protocol": "iscsi"
- },
- {
- "portName": "20000024ff5bc999",
- "protocol": "iscsi"
- }
- ]
- }`)
-
- hostReq = model.HostSpec{
- BaseModel: &model.BaseModel{},
- AccessMode: "agentless",
- HostName: "sap1",
- IP: "192.168.56.12",
- AvailabilityZones: []string{"default", "az2"},
- Initiators: []*model.Initiator{
- &model.Initiator{
- PortName: "20000024ff5bb888",
- Protocol: "iscsi",
- },
- &model.Initiator{
- PortName: "20000024ff5bc999",
- Protocol: "iscsi",
- },
- },
- }
-)
-
-func TestCreateHost(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- fakeHost := &SampleHosts[0]
-
- mockClient := new(dbtest.Client)
- mockClient.On("CreateHost", c.NewAdminContext(), &hostReq).Return(fakeHost, nil)
- mockClient.On("ListHostsByName", c.NewAdminContext(), hostReq.HostName).Return(nil, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("POST", "/v1beta/host/hosts", bytes.NewBuffer(ByteHostReq))
- w := httptest.NewRecorder()
- r.Header.Set("Content-Type", "application/JSON")
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
-
- var output model.HostSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, &output, fakeHost)
-
- })
-}
-
-func TestListHosts(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- fakeHosts := []*model.HostSpec{&SampleHosts[0], &SampleHosts[1]}
- mockClient := new(dbtest.Client)
- mockClient.On("ListHosts", c.NewAdminContext(), map[string][]string{}).Return(fakeHosts, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/host/hosts", nil)
- w := httptest.NewRecorder()
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output []*model.HostSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, output, fakeHosts)
- })
-}
-
-func TestGetHost(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- fakeHost := &SampleHosts[0]
- mockClient := new(dbtest.Client)
- mockClient.On("GetHost", c.NewAdminContext(), fakeHost.Id).Return(fakeHost, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/host/hosts/"+SampleHosts[0].Id, nil)
- w := httptest.NewRecorder()
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output model.HostSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, &output, fakeHost)
- })
-}
-
-func TestUpdateHost(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- fakeHost := &SampleHosts[0]
-
- var fakeHostUpdateReq model.HostSpec
- tmp, _ := json.Marshal(&hostReq)
- json.Unmarshal(tmp, &fakeHostUpdateReq)
- fakeHostUpdateReq.Id = fakeHost.Id
-
- mockClient := new(dbtest.Client)
- mockClient.On("UpdateHost", c.NewAdminContext(), &fakeHostUpdateReq).Return(fakeHost, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("PUT", "/v1beta/host/hosts/"+fakeHost.Id, bytes.NewBuffer(ByteHostReq))
- w := httptest.NewRecorder()
- r.Header.Set("Content-Type", "application/JSON")
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
-
- var output model.HostSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, &output, fakeHost)
-
- })
-}
-
-func TestDeleteHost(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- fakeHost := &SampleHosts[0]
- mockClient := new(dbtest.Client)
- mockClient.On("DeleteHost", c.NewAdminContext(), fakeHost.Id).Return(nil)
- mockClient.On("GetHost", c.NewAdminContext(), fakeHost.Id).Return(fakeHost, nil)
- mockClient.On("ListVolumeAttachmentsWithFilter", c.NewAdminContext(),
- map[string][]string{"hostId": []string{fakeHost.Id}}).Return(nil, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("DELETE", "/v1beta/host/hosts/"+fakeHost.Id, nil)
- w := httptest.NewRecorder()
- r.Header.Set("Content-Type", "application/JSON")
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
-
- assertTestResult(t, w.Code, 200)
-
- })
-}
diff --git a/pkg/api/controllers/metrics.go b/pkg/api/controllers/metrics.go
deleted file mode 100755
index e1b4206c8..000000000
--- a/pkg/api/controllers/metrics.go
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements a entry into the OpenSDS northbound service.
-
-*/
-
-package controllers
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
- "os"
- "os/exec"
-
- log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/api/policy"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/controller/client"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- . "github.com/opensds/opensds/pkg/utils/config"
-)
-
-// prometheus constants
-var PrometheusConfHome string
-var PrometheusUrl string
-var PrometheusConfFile string
-
-// alert manager constants
-var AlertmgrConfHome string
-var AlertmgrUrl string
-var AlertmgrConfFile string
-
-var GrafanaConfHome string
-var GrafanaRestartCmd string
-var GrafanaConfFile string
-
-var ReloadPath string
-var BackupExtension string
-
-func init() {
-
- ReloadPath = CONF.OsdsApiServer.ConfReloadUrl
- BackupExtension = ".bak"
-
- PrometheusConfHome = CONF.OsdsApiServer.PrometheusConfHome
- PrometheusUrl = CONF.OsdsApiServer.PrometheusUrl
- PrometheusConfFile = CONF.OsdsApiServer.PrometheusConfFile
-
- AlertmgrConfHome = CONF.OsdsApiServer.AlertmgrConfHome
- AlertmgrUrl = CONF.OsdsApiServer.AlertMgrUrl
- AlertmgrConfFile = CONF.OsdsApiServer.AlertmgrConfFile
-
- GrafanaConfHome = CONF.OsdsApiServer.GrafanaConfHome
- GrafanaRestartCmd = CONF.OsdsApiServer.GrafanaRestartCmd
- GrafanaConfFile = CONF.OsdsApiServer.GrafanaConfFile
-}
-
-func NewMetricsPortal() *MetricsPortal {
- return &MetricsPortal{
- CtrClient: client.NewClient(),
- }
-}
-
-type MetricsPortal struct {
- BasePortal
-
- CtrClient client.Client
-}
-
-func (m *MetricsPortal) GetMetrics() {
- if !policy.Authorize(m.Ctx, "metrics:get") {
- return
- }
- ctx := c.GetContext(m.Ctx)
- var getMetricSpec = model.GetMetricSpec{
- BaseModel: &model.BaseModel{},
- }
-
- // Unmarshal the request body
- if err := json.NewDecoder(m.Ctx.Request.Body).Decode(&getMetricSpec); err != nil {
- errMsg := fmt.Sprintf("parse get metric request body failed: %s", err.Error())
- m.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- if err := m.CtrClient.Connect(CONF.OsdsLet.ApiEndpoint); err != nil {
- log.Error("when connecting controller client:", err)
- return
- }
-
- opt := &pb.GetMetricsOpts{
- InstanceId: getMetricSpec.InstanceId,
- MetricName: getMetricSpec.MetricName,
- StartTime: getMetricSpec.StartTime,
- EndTime: getMetricSpec.EndTime,
- Context: ctx.ToJson(),
- }
- res, err := m.CtrClient.GetMetrics(context.Background(), opt)
- if err != nil {
- log.Error("collect metrics failed in controller service:", err)
- return
- }
-
- m.SuccessHandle(StatusOK, []byte(res.GetResult().GetMessage()))
-
- return
-}
-
-func (m *MetricsPortal) UploadConfFile() {
-
- if !policy.Authorize(m.Ctx, "metrics:uploadconf") {
- return
- }
- params, _ := m.GetParameters()
- confType := params["conftype"][0]
-
- switch confType {
- case "prometheus":
- DoUpload(m, PrometheusConfHome, PrometheusUrl, ReloadPath, true)
- case "alertmanager":
- DoUpload(m, AlertmgrConfHome, AlertmgrUrl, ReloadPath, true)
- case "grafana":
- // for grafana, there is no reload endpoint to call
- DoUpload(m, GrafanaConfHome, "", "", false)
- // to reload the configuration, run the reload command for grafana
- cmd := exec.Command("systemctl", "restart", GrafanaRestartCmd)
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- err := cmd.Run()
- if err != nil {
- log.Fatalf("restart grafana failed with %s\n", err)
- }
- return
-
- }
-}
-
-func DoUpload(metricsPortal *MetricsPortal, confHome string, url string, reloadPath string, toCallReloadEndpoint bool) {
-
- // get the uploaded file
- f, h, _ := metricsPortal.GetFile("conf_file")
-
- // get the path to save the configuration
- path := confHome + h.Filename
-
- // close the incoming file
- fCloseErr := f.Close()
- if fCloseErr != nil {
- log.Errorf("error closing uploaded file %s", h.Filename)
- metricsPortal.ErrorHandle(model.ErrorInternalServer, fCloseErr.Error())
- return
- }
-
- // backup the current configuration file
- _, currErr := os.Stat(path)
-
- // make backup path
- backupPath := path + BackupExtension
-
- if currErr == nil {
- // current configuration exists, back it up
- fRenameErr := os.Rename(path, backupPath)
- if fRenameErr != nil {
- log.Errorf("error renaming file %s to %s", path, backupPath)
- metricsPortal.ErrorHandle(model.ErrorInternalServer, fRenameErr.Error())
- return
- }
- }
-
- // save file to disk
- fSaveErr := metricsPortal.SaveToFile("conf_file", path)
- if fSaveErr != nil {
- log.Errorf("error saving file %s", path)
- } else {
- if toCallReloadEndpoint == true {
- reloadResp, reloadErr := http.Post(url+reloadPath, "application/json", nil)
- if reloadErr != nil {
- log.Errorf("error on reload of configuration %s", reloadErr)
- metricsPortal.ErrorHandle(model.ErrorInternalServer, reloadErr.Error())
- return
- }
- respBody, readBodyErr := ioutil.ReadAll(reloadResp.Body)
- if readBodyErr != nil {
- log.Errorf("error on reload of configuration %s", reloadErr)
- metricsPortal.ErrorHandle(model.ErrorInternalServer, readBodyErr.Error())
- return
- }
- metricsPortal.SuccessHandle(StatusOK, respBody)
- return
- }
- metricsPortal.SuccessHandle(StatusOK, nil)
- return
- }
-}
-
-func (m *MetricsPortal) DownloadConfFile() {
-
- if !policy.Authorize(m.Ctx, "metrics:downloadconf") {
- return
- }
- params, _ := m.GetParameters()
- confType := params["conftype"][0]
-
- switch confType {
- case "prometheus":
- DoDownload(m, PrometheusConfHome, PrometheusConfFile)
- case "alertmanager":
- DoDownload(m, AlertmgrConfHome, AlertmgrConfFile)
- case "grafana":
- DoDownload(m, GrafanaConfHome, GrafanaConfFile)
- }
-}
-
-func DoDownload(metricsPortal *MetricsPortal, confHome string, confFile string) {
- // get the path to the configuration file
- path := confHome + confFile
- // check, if file exists
- _, currErr := os.Stat(path)
- if currErr != nil && os.IsNotExist(currErr) {
- log.Errorf("file %s not found", path)
- metricsPortal.ErrorHandle(model.ErrorNotFound, currErr.Error())
- return
- }
- // file exists, download it
- metricsPortal.Ctx.Output.Download(path, path)
-}
-
-func (m *MetricsPortal) CollectMetrics() {
- if !policy.Authorize(m.Ctx, "metrics:collect") {
- return
- }
- ctx := c.GetContext(m.Ctx)
- var collMetricSpec = model.CollectMetricSpec{
- BaseModel: &model.BaseModel{},
- }
-
- // Unmarshal the request body
- if err := json.NewDecoder(m.Ctx.Request.Body).Decode(&collMetricSpec); err != nil {
- errMsg := fmt.Sprintf("parse collect metric request body failed: %s", err.Error())
- m.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // connect to the dock to collect metrics from the driver
- if err := m.CtrClient.Connect(CONF.OsdsLet.ApiEndpoint); err != nil {
- log.Errorf("error when connecting controller client: %s", err.Error())
- return
- }
-
- opt := &pb.CollectMetricsOpts{
- DriverName: collMetricSpec.DriverType,
- Context: ctx.ToJson(),
- }
-
- res, err := m.CtrClient.CollectMetrics(context.Background(), opt)
-
- if err != nil {
- log.Errorf("collect metrics failed in controller service: %s", err.Error())
- return
- }
-
- body, _ := json.Marshal(res)
- m.SuccessHandle(StatusOK, body)
-
- return
-}
-
-func (m *MetricsPortal) GetUrls() {
- if !policy.Authorize(m.Ctx, "metrics:urls") {
- return
- }
-
- if err := m.CtrClient.Connect(CONF.OsdsLet.ApiEndpoint); err != nil {
- log.Error("when connecting controller client:", err)
- return
- }
-
- opt := &pb.NoParams{}
- res, err := m.CtrClient.GetUrls(context.Background(), opt)
-
- if err != nil {
- log.Error("get urls failed in controller service:", err)
- return
- }
-
- m.SuccessHandle(StatusOK, []byte(res.GetResult().GetMessage()))
-
- return
-}
diff --git a/pkg/api/controllers/pool.go b/pkg/api/controllers/pool.go
deleted file mode 100755
index e764f0cbf..000000000
--- a/pkg/api/controllers/pool.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements a entry into the OpenSDS northbound service.
-
-*/
-
-package controllers
-
-import (
- "encoding/json"
- "fmt"
-
- "github.com/opensds/opensds/pkg/api/policy"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
-)
-
-type PoolPortal struct {
- BasePortal
-}
-
-func (p *PoolPortal) ListAvailabilityZones() {
- if !policy.Authorize(p.Ctx, "availability_zone:list") {
- return
- }
- azs, err := db.C.ListAvailabilityZones(c.GetContext(p.Ctx))
- if err != nil {
- errMsg := fmt.Sprintf("get AvailabilityZones for pools failed: %s", err.Error())
- p.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- body, err := json.Marshal(azs)
- if err != nil {
- errMsg := fmt.Sprintf("marshal AvailabilityZones failed: %s", err.Error())
- p.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- p.SuccessHandle(StatusOK, body)
- return
-}
-
-func (p *PoolPortal) ListPools() {
- if !policy.Authorize(p.Ctx, "pool:list") {
- return
- }
- // Call db api module to handle list pools request.
- m, err := p.GetParameters()
- if err != nil {
- errMsg := fmt.Sprintf("list pool parameters failed: %s", err.Error())
- p.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- result, err := db.C.ListPoolsWithFilter(c.GetContext(p.Ctx), m)
- if err != nil {
- errMsg := fmt.Sprintf("list pools failed: %s", err.Error())
- p.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- // Marshal the result.
- body, err := json.Marshal(result)
- if err != nil {
- errMsg := fmt.Sprintf("marshal pools failed: %s", err.Error())
- p.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- p.SuccessHandle(StatusOK, body)
- return
-}
-
-func (p *PoolPortal) GetPool() {
- if !policy.Authorize(p.Ctx, "pool:get") {
- return
- }
- id := p.Ctx.Input.Param(":poolId")
- result, err := db.C.GetPool(c.GetContext(p.Ctx), id)
- if err != nil {
- errMsg := fmt.Sprintf("pool %s not found: %s", id, err.Error())
- p.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- // Marshal the result.
- body, err := json.Marshal(result)
- if err != nil {
- errMsg := fmt.Sprintf("marshal pool failed: %s", err.Error())
- p.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- p.SuccessHandle(StatusOK, body)
- return
-}
diff --git a/pkg/api/controllers/pool_test.go b/pkg/api/controllers/pool_test.go
deleted file mode 100755
index 6f2632178..000000000
--- a/pkg/api/controllers/pool_test.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package controllers
-
-import (
- "encoding/json"
- "errors"
- "net/http"
- "net/http/httptest"
- "testing"
-
- "github.com/astaxie/beego"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
- . "github.com/opensds/opensds/testutils/collection"
- dbtest "github.com/opensds/opensds/testutils/db/testing"
-)
-
-func init() {
- var poolPortal PoolPortal
- beego.Router("/v1beta/pools", &poolPortal, "get:ListPools")
- beego.Router("/v1beta/availabilityZones", &poolPortal, "get:ListAvailabilityZones")
- beego.Router("/v1beta/pools/:poolId", &poolPortal, "get:GetPool")
-}
-
-func TestListAvailabilityZones(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("ListAvailabilityZones", c.NewAdminContext()).Return(SampleAvailabilityZones, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/availabilityZones", nil)
- w := httptest.NewRecorder()
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output []string
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, output, SampleAvailabilityZones)
- })
-}
-
-func TestListPools(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- var samplePools = []*model.StoragePoolSpec{&SamplePools[0], &SamplePools[1]}
- mockClient := new(dbtest.Client)
- m := map[string][]string{
- "offset": {"0"},
- "limit": {"1"},
- "sortDir": {"asc"},
- "sortKey": {"name"},
- }
- mockClient.On("ListPoolsWithFilter", c.NewAdminContext(), m).Return(samplePools, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/pools?offset=0&limit=1&sortDir=asc&sortKey=name", nil)
- w := httptest.NewRecorder()
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output []*model.StoragePoolSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, output, samplePools)
- })
-
- t.Run("Should return 500 if list pools with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- m := map[string][]string{
- "offset": {"0"},
- "limit": {"1"},
- "sortDir": {"asc"},
- "sortKey": {"name"},
- }
- mockClient.On("ListPoolsWithFilter", c.NewAdminContext(), m).Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/pools?offset=0&limit=1&sortDir=asc&sortKey=name", nil)
- w := httptest.NewRecorder()
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 500)
- })
-}
-
-func TestGetPool(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetPool", c.NewAdminContext(), "f4486139-78d5-462d-a7b9-fdaf6c797e1b").Return(&SamplePools[0], nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/pools/f4486139-78d5-462d-a7b9-fdaf6c797e1b", nil)
- w := httptest.NewRecorder()
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output model.StoragePoolSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, &output, &SamplePools[0])
- })
-
- t.Run("Should return 404 if get docks with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetPool", c.NewAdminContext(), "f4486139-78d5-462d-a7b9-fdaf6c797e1b").Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("GET",
- "/v1beta/pools/f4486139-78d5-462d-a7b9-fdaf6c797e1b", nil)
- w := httptest.NewRecorder()
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 404)
- })
-}
diff --git a/pkg/api/controllers/profile.go b/pkg/api/controllers/profile.go
deleted file mode 100755
index 9c9efa091..000000000
--- a/pkg/api/controllers/profile.go
+++ /dev/null
@@ -1,320 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements a entry into the OpenSDS northbound service.
-
-*/
-
-package controllers
-
-import (
- "encoding/json"
- "fmt"
- "strings"
-
- log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/api/policy"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
- "github.com/opensds/opensds/pkg/utils/constants"
-)
-
-type ProfilePortal struct {
- BasePortal
-}
-
-func (p *ProfilePortal) CreateProfile() {
- if !policy.Authorize(p.Ctx, "profile:create") {
- return
- }
-
- var profile = model.ProfileSpec{
- BaseModel: &model.BaseModel{},
- }
-
- // Unmarshal the request body
- if err := json.NewDecoder(p.Ctx.Request.Body).Decode(&profile); err != nil {
- errMsg := fmt.Sprintf("parse profile request body failed: %v", err)
- p.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // Validate StorageType is block or file
- stype := strings.ToLower(profile.StorageType)
- switch stype {
- case constants.Block:
- break
- case constants.File:
- pp := profile.ProvisioningProperties
- if ds := pp.DataStorage; ds.IsEmpty() {
- if len(ds.StorageAccessCapability) == 0 {
- profile.ProvisioningProperties.DataStorage.StorageAccessCapability = []string{"Read", "Write", "Execute"}
- }
- }
- if io := pp.IOConnectivity; io.IsEmpty() {
- if io.AccessProtocol == "" {
- profile.ProvisioningProperties.IOConnectivity.AccessProtocol = "nfs"
- }
- }
- default:
- errMsg := fmt.Sprintf("parse profile request body failed: %v is invalid storagetype", stype)
- p.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // Call db api module to handle create profile request.
- result, err := db.C.CreateProfile(c.GetContext(p.Ctx), &profile)
- if err != nil {
- errMsg := fmt.Sprintf("create profile failed: %v", err)
- p.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // Marshal the result.
- body, err := json.Marshal(result)
- if err != nil {
- errMsg := fmt.Sprintf("marshal profile created result failed: %v", err)
- p.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- p.SuccessHandle(StatusOK, body)
- return
-}
-
-func (p *ProfilePortal) ListProfiles() {
- if !policy.Authorize(p.Ctx, "profile:list") {
- return
- }
-
- m, err := p.GetParameters()
- if err != nil {
- errMsg := fmt.Sprintf("list profiles failed: %v", err)
- p.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- result, err := db.C.ListProfilesWithFilter(c.GetContext(p.Ctx), m)
- if err != nil {
- errMsg := fmt.Sprintf("list profiles failed: %v", err)
- p.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- // Marshal the result.
- body, err := json.Marshal(result)
- if err != nil {
- errMsg := fmt.Sprintf("marshal profiles listed result failed: %v", err)
- p.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- p.SuccessHandle(StatusOK, body)
- return
-}
-
-func (p *ProfilePortal) GetProfile() {
- if !policy.Authorize(p.Ctx, "profile:get") {
- return
- }
- id := p.Ctx.Input.Param(":profileId")
-
- result, err := db.C.GetProfile(c.GetContext(p.Ctx), id)
- if err != nil {
- errMsg := fmt.Sprintf("profile %s not found: %v", id, err)
- p.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- // Marshal the result.
- body, err := json.Marshal(result)
- if err != nil {
- errMsg := fmt.Sprintf("marshal profile got result failed: %v", err)
- p.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- p.SuccessHandle(StatusOK, body)
- return
-}
-
-func (p *ProfilePortal) UpdateProfile() {
-
- if !policy.Authorize(p.Ctx, "profile:update") {
- return
- }
- var profile = model.ProfileSpec{
- BaseModel: &model.BaseModel{},
- }
- id := p.Ctx.Input.Param(":profileId")
-
- if err := json.NewDecoder(p.Ctx.Request.Body).Decode(&profile); err != nil {
- errMsg := fmt.Sprintf("parse profile request body failed: %v", err)
- p.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- result, err := db.C.UpdateProfile(c.GetContext(p.Ctx), id, &profile)
- if err != nil {
- errMsg := fmt.Sprintf("update profiles failed: %v", err)
- p.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- // Marshal the result.
- body, err := json.Marshal(result)
- if err != nil {
- errMsg := fmt.Sprintf("marshal profile updated result failed: %v", err)
- p.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- p.SuccessHandle(StatusOK, body)
- return
-}
-
-func (p *ProfilePortal) DeleteProfile() {
-
- if !policy.Authorize(p.Ctx, "profile:delete") {
- return
- }
- id := p.Ctx.Input.Param(":profileId")
- ctx := c.GetContext(p.Ctx)
- profile, err := db.C.GetProfile(ctx, id)
- if err != nil {
- errMsg := fmt.Sprintf("profile %s not found: %v", id, err)
- p.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- // Check the depedency before deletion of profile
- // If no dependency then only allow user to delete profile
- // 1. Check the volumes created through that profile
- // 2. Check the fileshares created through that profile
- if profile.StorageType == constants.Block {
- vols, err := db.C.ListVolumesByProfileId(ctx, id)
- if err != nil {
- errMsg := fmt.Sprintf("failed to fetch volumes for specified profile: %v", err)
- p.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
- if len(vols) > 0 {
- errMsg := fmt.Sprintf("There are dependent volumes : %v for the specified profile %v", vols, id)
- p.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
- } else {
- fileshares, err := db.C.ListFileSharesByProfileId(ctx, id)
- if err != nil {
- errMsg := fmt.Sprintf("failed to fetch fileshares for specified profileId: %v", err)
- p.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
- if len(fileshares) > 0 {
- errMsg := fmt.Sprintf("There are dependent fileshares : %v for the specified profile %v", fileshares, id)
- p.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
- }
-
- log.V(5).Infof("There are no dependecies on the specified profile, so deleting : %v", profile)
- err = db.C.DeleteProfile(ctx, profile.Id)
- if err != nil {
- errMsg := fmt.Sprintf("delete profiles failed: %v", err)
- p.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- p.SuccessHandle(StatusOK, nil)
- return
-}
-
-func (p *ProfilePortal) AddCustomProperty() {
-
- if !policy.Authorize(p.Ctx, "profile:add_custom_property") {
- return
- }
- var custom model.CustomPropertiesSpec
- id := p.Ctx.Input.Param(":profileId")
-
- if err := json.NewDecoder(p.Ctx.Request.Body).Decode(&custom); err != nil {
- errMsg := fmt.Sprintf("parse custom properties request body failed: %v", err)
- p.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- result, err := db.C.AddCustomProperty(c.GetContext(p.Ctx), id, custom)
- if err != nil {
- errMsg := fmt.Sprintf("add custom property failed: %v", err)
- p.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- // Marshal the result.
- body, err := json.Marshal(result)
- if err != nil {
- errMsg := fmt.Sprintf("marshal custom property added result failed: %v", err)
- p.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- p.SuccessHandle(StatusOK, body)
- return
-}
-
-func (p *ProfilePortal) ListCustomProperties() {
-
- if !policy.Authorize(p.Ctx, "profile:list_custom_properties") {
- return
- }
- id := p.Ctx.Input.Param(":profileId")
-
- result, err := db.C.ListCustomProperties(c.GetContext(p.Ctx), id)
- if err != nil {
- errMsg := fmt.Sprintf("list custom properties failed: %v", err)
- p.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- // Marshal the result.
- body, err := json.Marshal(result)
- if err != nil {
- errMsg := fmt.Sprintf("marshal custom properties listed result failed: %v", err)
- p.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- p.SuccessHandle(StatusOK, body)
- return
-}
-
-func (p *ProfilePortal) RemoveCustomProperty() {
-
- if !policy.Authorize(p.Ctx, "profile:remove_custom_property") {
- return
- }
- id := p.Ctx.Input.Param(":profileId")
- customKey := p.Ctx.Input.Param(":customKey")
-
- if err := db.C.RemoveCustomProperty(c.GetContext(p.Ctx), id, customKey); err != nil {
- errMsg := fmt.Sprintf("remove custom property failed: %v", err)
- p.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- p.SuccessHandle(StatusOK, nil)
- return
-}
diff --git a/pkg/api/controllers/profile_test.go b/pkg/api/controllers/profile_test.go
deleted file mode 100755
index af7728446..000000000
--- a/pkg/api/controllers/profile_test.go
+++ /dev/null
@@ -1,643 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package controllers
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "net/http"
- "net/http/httptest"
- "strings"
- "testing"
-
- "github.com/astaxie/beego"
- "github.com/astaxie/beego/context"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
- . "github.com/opensds/opensds/testutils/collection"
- dbtest "github.com/opensds/opensds/testutils/db/testing"
-)
-
-func init() {
- var profilePortal ProfilePortal
- beego.Router("/v1beta/profiles", &profilePortal, "post:CreateProfile;get:ListProfiles")
- beego.Router("/v1beta/profiles/:profileId", &profilePortal, "get:GetProfile;put:UpdateProfile;delete:DeleteProfile")
- beego.Router("/v1beta/profiles/:profileId/customProperties", &profilePortal, "post:AddCustomProperty;get:ListCustomProperties")
- beego.Router("/v1beta/profiles/:profileId/customProperties/:customKey", &profilePortal, "delete:RemoveCustomProperty")
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Tests for profile //
-////////////////////////////////////////////////////////////////////////////////
-
-func TestCreateProfile(t *testing.T) {
- var fakeBody = `{
- "name": "default",
- "description": "default policy",
- "storageType": "block",
- "customProperties": {
- "dataStorage": {
- "provisioningPolicy": "Thin",
- "compression": true,
- "deduplication": true
- },
- "ioConnectivity": {
- "accessProtocol": "rbd",
- "maxIOPS": 5000000,
- "maxBWS": 500,
- "minIOPS": 1000000,
- "minBWS": 100,
- "latency": 100
- }
- }
- }`
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("CreateProfile", c.NewAdminContext(), &model.ProfileSpec{
- BaseModel: &model.BaseModel{},
- Name: "default",
- Description: "default policy",
- StorageType: "block",
- CustomProperties: model.CustomPropertiesSpec{
- "dataStorage": map[string]interface{}{
- "provisioningPolicy": "Thin",
- "compression": true,
- "deduplication": true,
- },
- "ioConnectivity": map[string]interface{}{
- "accessProtocol": "rbd",
- "maxIOPS": float64(5000000),
- "maxBWS": float64(500),
- "minIOPS": float64(1000000),
- "minBWS": float64(100),
- "latency": float64(100),
- },
- }}).Return(&SampleProfiles[1], nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("POST", "/v1beta/profiles", strings.NewReader(fakeBody))
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output model.ProfileSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, &output, &SampleProfiles[1])
- })
-}
-
-func TestUpdateProfile(t *testing.T) {
- var jsonStr = []byte(`{
- "id": "2f9c0a04-66ef-11e7-ade2-43158893e017",
- "name": "silver",
- "description": "silver policy"
- }`)
- var expectedJson = []byte(`{
- "id": "2f9c0a04-66ef-11e7-ade2-43158893e017",
- "name": "silver",
- "description": "silver policy",
- "customProperties": {
- "dataStorage": {
- "provisioningPolicy": "Thin",
- "compression": true,
- "deduplication": true
- },
- "ioConnectivity": {
- "accessProtocol": "rbd",
- "maxIOPS": 5000000,
- "maxBWS": 500,
- "minIOPS": 1000000,
- "minBWS": 100,
- "latency": 100
- }
- }
- }`)
- var expected model.ProfileSpec
- json.Unmarshal(expectedJson, &expected)
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- profile := model.ProfileSpec{BaseModel: &model.BaseModel{}}
- json.NewDecoder(bytes.NewBuffer(jsonStr)).Decode(&profile)
- mockClient := new(dbtest.Client)
- mockClient.On("UpdateProfile", c.NewAdminContext(), profile.Id, &profile).
- Return(&expected, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("PUT", "/v1beta/profiles/2f9c0a04-66ef-11e7-ade2-43158893e017", bytes.NewBuffer(jsonStr))
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output model.ProfileSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, &output, &expected)
- })
-
- t.Run("Should return 500 if update profile with bad request", func(t *testing.T) {
- profile := model.ProfileSpec{BaseModel: &model.BaseModel{}}
- json.NewDecoder(bytes.NewBuffer(jsonStr)).Decode(&profile)
- mockClient := new(dbtest.Client)
- mockClient.On("UpdateProfile", c.NewAdminContext(), profile.Id, &profile).
- Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("PUT", "/v1beta/profiles/2f9c0a04-66ef-11e7-ade2-43158893e017", bytes.NewBuffer(jsonStr))
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 500)
- })
-}
-
-func TestListProfiles(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- var sampleProfiles = []*model.ProfileSpec{&SampleProfiles[1]}
- mockClient := new(dbtest.Client)
- m := map[string][]string{
- "offset": {"0"},
- "limit": {"1"},
- "sortDir": {"asc"},
- "sortKey": {"name"},
- }
- mockClient.On("ListProfilesWithFilter", c.NewAdminContext(), m).Return(
- sampleProfiles, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/profiles?offset=0&limit=1&sortDir=asc&sortKey=name", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output []*model.ProfileSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, output, sampleProfiles)
- })
-
- t.Run("Should return 500 if list profiles with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- m := map[string][]string{
- "offset": {"0"},
- "limit": {"1"},
- "sortDir": {"asc"},
- "sortKey": {"name"},
- }
- mockClient.On("ListProfilesWithFilter", c.NewAdminContext(), m).Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/profiles?offset=0&limit=1&sortDir=asc&sortKey=name", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 500)
- })
-}
-
-func TestGetProfile(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetProfile", c.NewAdminContext(), "2f9c0a04-66ef-11e7-ade2-43158893e017").
- Return(&SampleProfiles[1], nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/profiles/2f9c0a04-66ef-11e7-ade2-43158893e017", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output model.ProfileSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, &output, &SampleProfiles[1])
- })
-
- t.Run("Should return 404 if get profile with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetProfile", c.NewAdminContext(), "2f9c0a04-66ef-11e7-ade2-43158893e017").Return(
- nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("GET",
- "/v1beta/profiles/2f9c0a04-66ef-11e7-ade2-43158893e017", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 404)
- })
-}
-
-func TestDeleteProfile(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetProfile", c.NewAdminContext(), "2f9c0a04-66ef-11e7-ade2-43158893e017").Return(
- &SampleProfiles[1], nil)
- mockClient.On("ListVolumesByProfileId", c.NewAdminContext(), "2f9c0a04-66ef-11e7-ade2-43158893e017").Return(
- SampleVolumeNames, nil)
- mockClient.On("DeleteProfile", c.NewAdminContext(), "2f9c0a04-66ef-11e7-ade2-43158893e017").Return(nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("DELETE",
- "/v1beta/profiles/2f9c0a04-66ef-11e7-ade2-43158893e017", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 200)
- })
-
- t.Run("Should return 404 if delete profile with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetProfile", c.NewAdminContext(), "2f9c0a04-66ef-11e7-ade2-43158893e017").Return(
- nil, errors.New("Invalid resource uuid"))
- mockClient.On("ListVolumesByProfileId", c.NewAdminContext(), "2f9c0a04-66ef-11e7-ade2-43158893e017").Return(
- nil, errors.New("Depency volumes"))
- db.C = mockClient
-
- r, _ := http.NewRequest("DELETE",
- "/v1beta/profiles/2f9c0a04-66ef-11e7-ade2-43158893e017", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 404)
- })
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Tests for file share profile //
-////////////////////////////////////////////////////////////////////////////////
-
-func TestFileShareCreateProfile(t *testing.T) {
- var fakeBody = `{
- "name": "silver",
- "description": "silver policy",
- "storageType": "file",
- "provisioningProperties":{
- "dataStorage":{
- "storageAccessCapability": ["Read","Write","Execute"],
- "provisioningPolicy": "Thin",
- "compression": true,
- "deduplication": true
- },
- "ioConnectivity": {
- "accessProtocol": "NFS",
- "maxIOPS": 5000000,
- "maxBWS": 500,
- "minIOPS": 1000000,
- "minBWS": 100,
- "latency": 100
- }
- }
- }`
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("CreateProfile", c.NewAdminContext(), &model.ProfileSpec{
- BaseModel: &model.BaseModel{},
- Name: "silver",
- Description: "silver policy",
- StorageType: "file",
- ProvisioningProperties: model.ProvisioningPropertiesSpec{
- DataStorage: model.DataStorageLoS{
- StorageAccessCapability: []string{"Read", "Write", "Execute"},
- ProvisioningPolicy: "Thin",
- Compression: true,
- Deduplication: true,
- },
- IOConnectivity: model.IOConnectivityLoS{
- AccessProtocol: "NFS",
- MaxIOPS: 5000000,
- MaxBWS: 500,
- MinIOPS: 1000000,
- MinBWS: 100,
- Latency: 100,
- },
- }}).Return(&SampleFileShareProfiles[1], nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("POST", "/v1beta/profiles", strings.NewReader(fakeBody))
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output model.ProfileSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, &output, &SampleFileShareProfiles[1])
- })
-}
-func TestFileShareUpdateProfile(t *testing.T) {
- var jsonStr = []byte(`{
- "id": "2f9c0a04-66ef-11e7-ade2-43158893e017",
- "name": "silver",
- "description": "silver policy"
- }`)
- var expectedJson = []byte(`{
- "id": "2f9c0a04-66ef-11e7-ade2-43158893e017",
- "name": "silver",
- "description": "silver policy",
- "storageType": "file",
- "provisioningProperties":{
- "dataStorage":{
- "storageAccessCapability": ["Read","Write","Execute"],
- "provisioningPolicy": "Thin",
- "compression": true,
- "deduplication": true
- },
- "ioConnectivity": {
- "accessProtocol": "NFS",
- "maxIOPS": 5000000,
- "maxBWS": 500,
- "minIOPS": 1000000,
- "minBWS": 100,
- "latency": 100
- }
- }
- }`)
- var expected model.ProfileSpec
- json.Unmarshal(expectedJson, &expected)
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- profile := model.ProfileSpec{BaseModel: &model.BaseModel{}}
- json.NewDecoder(bytes.NewBuffer(jsonStr)).Decode(&profile)
- mockClient := new(dbtest.Client)
- mockClient.On("UpdateProfile", c.NewAdminContext(), profile.Id, &profile).
- Return(&expected, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("PUT", "/v1beta/profiles/2f9c0a04-66ef-11e7-ade2-43158893e017", bytes.NewBuffer(jsonStr))
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output model.ProfileSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, &output, &expected)
- })
-
- t.Run("Should return 500 if update profile with bad request", func(t *testing.T) {
- profile := model.ProfileSpec{BaseModel: &model.BaseModel{}}
- json.NewDecoder(bytes.NewBuffer(jsonStr)).Decode(&profile)
- mockClient := new(dbtest.Client)
- mockClient.On("UpdateProfile", c.NewAdminContext(), profile.Id, &profile).
- Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("PUT", "/v1beta/profiles/2f9c0a04-66ef-11e7-ade2-43158893e017", bytes.NewBuffer(jsonStr))
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 500)
- })
-}
-
-func TestListFileShareProfiles(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- var sampleProfiles = []*model.ProfileSpec{&SampleFileShareProfiles[1]}
- mockClient := new(dbtest.Client)
- m := map[string][]string{
- "offset": {"0"},
- "limit": {"1"},
- "sortDir": {"asc"},
- "sortKey": {"name"},
- }
- mockClient.On("ListProfilesWithFilter", c.NewAdminContext(), m).Return(
- sampleProfiles, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/profiles?offset=0&limit=1&sortDir=asc&sortKey=name", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output []*model.ProfileSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, output, sampleProfiles)
- })
-
- t.Run("Should return 500 if list profiles with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- m := map[string][]string{
- "offset": {"0"},
- "limit": {"1"},
- "sortDir": {"asc"},
- "sortKey": {"name"},
- }
- mockClient.On("ListProfilesWithFilter", c.NewAdminContext(), m).Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/profiles?offset=0&limit=1&sortDir=asc&sortKey=name", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 500)
- })
-}
-
-func TestGetFileShareProfile(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetProfile", c.NewAdminContext(), "2f9c0a04-66ef-11e7-ade2-43158893e017").
- Return(&SampleFileShareProfiles[1], nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/profiles/2f9c0a04-66ef-11e7-ade2-43158893e017", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output model.ProfileSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, &output, &SampleFileShareProfiles[1])
- })
-
- t.Run("Should return 404 if get profile with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetProfile", c.NewAdminContext(), "2f9c0a04-66ef-11e7-ade2-43158893e017").Return(
- nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("GET",
- "/v1beta/profiles/2f9c0a04-66ef-11e7-ade2-43158893e017", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 404)
- })
-}
-
-func TestDeleteFileShareProfile(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetProfile", c.NewAdminContext(), "2f9c0a04-66ef-11e7-ade2-43158893e017").Return(
- &SampleFileShareProfiles[1], nil)
- mockClient.On("ListFileSharesByProfileId", c.NewAdminContext(), "2f9c0a04-66ef-11e7-ade2-43158893e017").Return(
- SampleShareNames, nil)
- mockClient.On("DeleteProfile", c.NewAdminContext(), "2f9c0a04-66ef-11e7-ade2-43158893e017").Return(nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("DELETE",
- "/v1beta/profiles/2f9c0a04-66ef-11e7-ade2-43158893e017", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 200)
- })
-
- t.Run("Should return 404 if delete profile with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetProfile", c.NewAdminContext(), "2f9c0a04-66ef-11e7-ade2-43158893e017").Return(
- nil, errors.New("Invalid resource uuid"))
- mockClient.On("ListFileSharesByProfileId", c.NewAdminContext(), "2f9c0a04-66ef-11e7-ade2-43158893e017").Return(
- nil, errors.New("Depency FileShares"))
- db.C = mockClient
-
- r, _ := http.NewRequest("DELETE",
- "/v1beta/profiles/2f9c0a04-66ef-11e7-ade2-43158893e017", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 404)
- })
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Tests for profile custom properties spec //
-////////////////////////////////////////////////////////////////////////////////
-
-func TestAddCustomProperty(t *testing.T) {
- var fakeBody = `{
- "dataStorage": {
- "provisioningPolicy": "Thin",
- "compression": true,
- "deduplication": true
- }
- }`
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("AddCustomProperty", c.NewAdminContext(), "2f9c0a04-66ef-11e7-ade2-43158893e017", model.CustomPropertiesSpec{
- "dataStorage": map[string]interface{}{
- "provisioningPolicy": "Thin",
- "compression": true, "deduplication": true}}).Return(&SampleCustomProperties, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("POST", "/v1beta/profiles/2f9c0a04-66ef-11e7-ade2-43158893e017/customProperties", strings.NewReader(fakeBody))
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output model.CustomPropertiesSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, &output, &SampleCustomProperties)
- })
-}
-
-func TestListCustomProperties(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("ListCustomProperties", c.NewAdminContext(), "2f9c0a04-66ef-11e7-ade2-43158893e017").Return(
- &SampleCustomProperties, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/profiles/2f9c0a04-66ef-11e7-ade2-43158893e017/customProperties", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output model.CustomPropertiesSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, &output, &SampleCustomProperties)
- })
-
- t.Run("Should return 500 if list custom properties with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("ListCustomProperties", c.NewAdminContext(), "2f9c0a04-66ef-11e7-ade2-43158893e017").Return(
- nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/profiles/2f9c0a04-66ef-11e7-ade2-43158893e017/customProperties", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 500)
- })
-}
-
-func TestRemoveCustomProperty(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("RemoveCustomProperty", c.NewAdminContext(), "2f9c0a04-66ef-11e7-ade2-43158893e017", "key1").Return(nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("DELETE",
- "/v1beta/profiles/2f9c0a04-66ef-11e7-ade2-43158893e017/customProperties/key1", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
-
- assertTestResult(t, w.Code, 200)
- })
-}
diff --git a/pkg/api/controllers/replication.go b/pkg/api/controllers/replication.go
deleted file mode 100755
index 0928d16ce..000000000
--- a/pkg/api/controllers/replication.go
+++ /dev/null
@@ -1,462 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package controllers
-
-import (
- "context"
- "encoding/json"
- "fmt"
-
- log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/api/policy"
- "github.com/opensds/opensds/pkg/api/util"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/controller/client"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- . "github.com/opensds/opensds/pkg/utils/config"
-)
-
-func NewReplicationPortal() *ReplicationPortal {
- return &ReplicationPortal{
- CtrClient: client.NewClient(),
- }
-}
-
-type ReplicationPortal struct {
- BasePortal
-
- CtrClient client.Client
-}
-
-var whiteListSimple = []string{"Id", "Name", "ReplicationStatus"}
-var whiteList = []string{"Id", "CreatedAt", "UpdatedAt", "Name", "Description", "AvailabilityZone", "ReplicationStatus",
- "PrimaryVolumeId", "SecondaryVolumeId", "PrimaryReplicationDriverData", "SecondaryReplicationDriverData",
- "ReplicationMode", "ReplicationPeriod", "ProfileId", "Metadata"}
-
-func (r *ReplicationPortal) CreateReplication() {
- if !policy.Authorize(r.Ctx, "replication:create") {
- return
- }
- ctx := c.GetContext(r.Ctx)
- var replication = &model.ReplicationSpec{
- BaseModel: &model.BaseModel{},
- }
-
- // Unmarshal the request body
- if err := json.NewDecoder(r.Ctx.Request.Body).Decode(replication); err != nil {
- errMsg := fmt.Sprintf("parse replication request body failed: %s", err.Error())
- r.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- result, err := util.CreateReplicationDBEntry(ctx, replication)
- if err != nil {
- errMsg := fmt.Sprintf("create volume replication failed: %s", err.Error())
- r.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // Marshal the result.
- body, err := json.Marshal(result)
- if err != nil {
- errMsg := fmt.Sprintf("marshal replication created result failed: %s", err.Error())
- r.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
- r.SuccessHandle(StatusAccepted, body)
-
- // NOTE:The real volume replication creation process.
- // Volume replication creation request is sent to the Dock. Dock will update volume status to "available"
- // after volume replication creation is completed.
- if err = r.CtrClient.Connect(CONF.OsdsLet.ApiEndpoint); err != nil {
- log.Error("when connecting controller client:", err)
- return
- }
-
- opt := &pb.CreateReplicationOpts{
- Id: result.Id,
- Name: result.Name,
- Description: result.Description,
- PrimaryVolumeId: result.PrimaryVolumeId,
- SecondaryVolumeId: result.SecondaryVolumeId,
- AvailabilityZone: result.AvailabilityZone,
- ProfileId: result.ProfileId,
- Context: ctx.ToJson(),
- }
- response, err := r.CtrClient.CreateReplication(context.Background(), opt)
- if err != nil {
- log.Error("create volume replication failed in controller service:", err)
- return
- }
- if errorMsg := response.GetError(); errorMsg != nil {
- log.Errorf("failed to create volume replication in controller, code: %v, message: %v",
- errorMsg.GetCode(), errorMsg.GetDescription())
- return
- }
-
- return
-}
-
-func (r *ReplicationPortal) ListReplications() {
- if !policy.Authorize(r.Ctx, "replication:list") {
- return
- }
-
- // Call db api module to handle list replications request.
- params, err := r.GetParameters()
- if err != nil {
- errMsg := fmt.Sprintf("list replications failed: %s", err.Error())
- r.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- result, err := db.C.ListReplicationWithFilter(c.GetContext(r.Ctx), params)
- if err != nil {
- errMsg := fmt.Sprintf("list replications failed: %s", err.Error())
- r.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- // Marshal the result.
- body, err := json.Marshal(r.outputFilter(result, whiteListSimple))
- if err != nil {
- errMsg := fmt.Sprintf("marshal replications listed result failed: %s", err.Error())
- r.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- r.SuccessHandle(StatusOK, body)
- return
-
-}
-
-func (r *ReplicationPortal) ListReplicationsDetail() {
- if !policy.Authorize(r.Ctx, "replication:list_detail") {
- return
- }
-
- // Call db api module to handle list replications request.
- params, err := r.GetParameters()
- if err != nil {
- errMsg := fmt.Sprintf("list replications detail failed: %s", err.Error())
- r.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- result, err := db.C.ListReplicationWithFilter(c.GetContext(r.Ctx), params)
- if err != nil {
- errMsg := fmt.Sprintf("list replications detail failed: %s", err.Error())
- r.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- // Marshal the result.
- body, err := json.Marshal(result)
- if err != nil {
- errMsg := fmt.Sprintf("marshal replications detail listed result failed: %s", err.Error())
- r.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- r.SuccessHandle(StatusOK, body)
- return
-}
-
-func (r *ReplicationPortal) GetReplication() {
- if !policy.Authorize(r.Ctx, "replication:get") {
- return
- }
-
- id := r.Ctx.Input.Param(":replicationId")
- // Call db api module to handle get volume request.
- result, err := db.C.GetReplication(c.GetContext(r.Ctx), id)
- if err != nil {
- errMsg := fmt.Sprintf("get replication failed: %s", err.Error())
- r.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- // Marshal the result.
- body, err := json.Marshal(r.outputFilter(result, whiteList))
- if err != nil {
- errMsg := fmt.Sprintf("marshal replication showed result failed: %s", err.Error())
- r.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- r.SuccessHandle(StatusOK, body)
- return
-}
-
-func (r *ReplicationPortal) UpdateReplication() {
- if !policy.Authorize(r.Ctx, "replication:update") {
- return
- }
- var mr = model.ReplicationSpec{
- BaseModel: &model.BaseModel{},
- }
-
- id := r.Ctx.Input.Param(":replicationId")
- if err := json.NewDecoder(r.Ctx.Request.Body).Decode(&mr); err != nil {
- errMsg := fmt.Sprintf("parse replication request body failed: %s", err.Error())
- r.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- if mr.ProfileId != "" {
- if _, err := db.C.GetProfile(c.GetContext(r.Ctx), mr.ProfileId); err != nil {
- errMsg := fmt.Sprintf("get profile failed: %s", err.Error())
- r.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
- // TODO:compare with the original profile_id to get the differences
- }
-
- result, err := db.C.UpdateReplication(c.GetContext(r.Ctx), id, &mr)
- if err != nil {
- errMsg := fmt.Sprintf("update replication failed: %s", err.Error())
- r.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- // Marshal the result.
- body, err := json.Marshal(result)
- if err != nil {
- errMsg := fmt.Sprintf("marshal replication updated result failed: %s", err.Error())
- r.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- r.SuccessHandle(StatusOK, body)
- return
-}
-
-func (r *ReplicationPortal) DeleteReplication() {
- if !policy.Authorize(r.Ctx, "replication:delete") {
- return
- }
- ctx := c.GetContext(r.Ctx)
-
- id := r.Ctx.Input.Param(":replicationId")
- rep, err := db.C.GetReplication(ctx, id)
- if err != nil {
- errMsg := fmt.Sprintf("get replication failed: %s", err.Error())
- r.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- if err := util.DeleteReplicationDBEntry(ctx, rep); err != nil {
- r.ErrorHandle(model.ErrorBadRequest, err.Error())
- return
- }
- r.SuccessHandle(StatusAccepted, nil)
-
- // NOTE:The real volume replication deletion process.
- // Volume replication deletion request is sent to the Dock. Dock will remove
- // replicaiton record after volume replication creation is completed.
- if err = r.CtrClient.Connect(CONF.OsdsLet.ApiEndpoint); err != nil {
- log.Error("when connecting controller client:", err)
- return
- }
-
- opt := &pb.DeleteReplicationOpts{
- Id: rep.Id,
- PrimaryVolumeId: rep.PrimaryVolumeId,
- SecondaryVolumeId: rep.SecondaryVolumeId,
- AvailabilityZone: rep.AvailabilityZone,
- ProfileId: rep.ProfileId,
- Metadata: rep.Metadata,
- Context: ctx.ToJson(),
- }
- response, err := r.CtrClient.DeleteReplication(context.Background(), opt)
- if err != nil {
- log.Error("delete volume replication failed in controller service:", err)
- return
- }
- if errorMsg := response.GetError(); errorMsg != nil {
- log.Errorf("failed to delete volume replication in controller, code: %v, message: %v",
- errorMsg.GetCode(), errorMsg.GetDescription())
- return
- }
-
- return
-}
-
-func (r *ReplicationPortal) EnableReplication() {
- if !policy.Authorize(r.Ctx, "replication:enable") {
- return
- }
- ctx := c.GetContext(r.Ctx)
-
- id := r.Ctx.Input.Param(":replicationId")
- rep, err := db.C.GetReplication(ctx, id)
- if err != nil {
- errMsg := fmt.Sprintf("get replication failed: %s", err.Error())
- r.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- if err := util.EnableReplicationDBEntry(ctx, rep); err != nil {
- r.ErrorHandle(model.ErrorBadRequest, err.Error())
- return
- }
- r.SuccessHandle(StatusAccepted, nil)
-
- // NOTE:The real volume replication enable process.
- // Volume replication enable request is sent to the Dock. Dock will set
- // volume replication status to 'available' after volume replication enable
- // operation is completed.
- if err = r.CtrClient.Connect(CONF.OsdsLet.ApiEndpoint); err != nil {
- log.Error("when connecting controller client:", err)
- return
- }
-
- opt := &pb.EnableReplicationOpts{
- Id: rep.Id,
- PrimaryVolumeId: rep.PrimaryVolumeId,
- SecondaryVolumeId: rep.SecondaryVolumeId,
- AvailabilityZone: rep.AvailabilityZone,
- ProfileId: rep.ProfileId,
- Metadata: rep.Metadata,
- Context: ctx.ToJson(),
- }
- response, err := r.CtrClient.EnableReplication(context.Background(), opt)
- if err != nil {
- log.Error("enable volume replication failed in controller service:", err)
- return
- }
- if errorMsg := response.GetError(); errorMsg != nil {
- log.Errorf("failed to enable volume replication in controller, code: %v, message: %v",
- errorMsg.GetCode(), errorMsg.GetDescription())
- return
- }
-
- return
-}
-
-func (r *ReplicationPortal) DisableReplication() {
- if !policy.Authorize(r.Ctx, "replication:disable") {
- return
- }
- ctx := c.GetContext(r.Ctx)
-
- id := r.Ctx.Input.Param(":replicationId")
- rep, err := db.C.GetReplication(ctx, id)
- if err != nil {
- errMsg := fmt.Sprintf("get replication failed: %s", err.Error())
- r.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- if err := util.DisableReplicationDBEntry(ctx, rep); err != nil {
- r.ErrorHandle(model.ErrorBadRequest, err.Error())
- return
- }
- r.SuccessHandle(StatusAccepted, nil)
-
- // NOTE:The real volume replication disable process.
- // Volume replication diable request is sent to the Dock. Dock will set
- // volume replication status to 'available' after volume replication disable
- // operation is completed.
- if err = r.CtrClient.Connect(CONF.OsdsLet.ApiEndpoint); err != nil {
- log.Error("when connecting controller client:", err)
- return
- }
-
- opt := &pb.DisableReplicationOpts{
- Id: rep.Id,
- PrimaryVolumeId: rep.PrimaryVolumeId,
- SecondaryVolumeId: rep.SecondaryVolumeId,
- AvailabilityZone: rep.AvailabilityZone,
- ProfileId: rep.ProfileId,
- Metadata: rep.Metadata,
- Context: ctx.ToJson(),
- }
- response, err := r.CtrClient.DisableReplication(context.Background(), opt)
- if err != nil {
- log.Error("disable volume replication failed in controller service:", err)
- return
- }
- if errorMsg := response.GetError(); errorMsg != nil {
- log.Errorf("failed to disable volume replication in controller, code: %v, message: %v",
- errorMsg.GetCode(), errorMsg.GetDescription())
- return
- }
-
- return
-}
-
-func (r *ReplicationPortal) FailoverReplication() {
- if !policy.Authorize(r.Ctx, "replication:failover") {
- return
- }
- ctx := c.GetContext(r.Ctx)
-
- var failover = model.FailoverReplicationSpec{}
- if err := json.NewDecoder(r.Ctx.Request.Body).Decode(&failover); err != nil {
- errMsg := fmt.Sprintf("parse replication request body failed: %s", err.Error())
- r.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- id := r.Ctx.Input.Param(":replicationId")
- rep, err := db.C.GetReplication(ctx, id)
- if err != nil {
- errMsg := fmt.Sprintf("get replication failed: %s", err.Error())
- r.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- if err := util.FailoverReplicationDBEntry(ctx, rep, failover.SecondaryBackendId); err != nil {
- r.ErrorHandle(model.ErrorBadRequest, err.Error())
- return
- }
- r.SuccessHandle(StatusAccepted, nil)
-
- // NOTE:The real volume replication failover process.
- // Volume replication failover request is sent to the Dock. Dock will set
- // volume replication status to 'available' after volume replication failover
- // operation is completed.
- if err = r.CtrClient.Connect(CONF.OsdsLet.ApiEndpoint); err != nil {
- log.Error("when connecting controller client:", err)
- return
- }
-
- opt := &pb.FailoverReplicationOpts{
- Id: rep.Id,
- PrimaryVolumeId: rep.PrimaryVolumeId,
- SecondaryVolumeId: rep.SecondaryVolumeId,
- AvailabilityZone: rep.AvailabilityZone,
- ProfileId: rep.ProfileId,
- Metadata: rep.Metadata,
- AllowAttachedVolume: failover.AllowAttachedVolume,
- SecondaryBackendId: failover.SecondaryBackendId,
- Context: ctx.ToJson(),
- }
- response, err := r.CtrClient.FailoverReplication(context.Background(), opt)
- if err != nil {
- log.Error("failover volume replication failed in controller service:", err)
- return
- }
- if errorMsg := response.GetError(); errorMsg != nil {
- log.Errorf("failed to failover volume replication in controller, code: %v, message: %v",
- errorMsg.GetCode(), errorMsg.GetDescription())
- return
- }
-
- return
-}
diff --git a/pkg/api/controllers/replication_test.go b/pkg/api/controllers/replication_test.go
deleted file mode 100755
index b882d8517..000000000
--- a/pkg/api/controllers/replication_test.go
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package controllers
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "net/http"
- "net/http/httptest"
- "testing"
-
- "github.com/astaxie/beego"
- "github.com/astaxie/beego/context"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
- . "github.com/opensds/opensds/testutils/collection"
- dbtest "github.com/opensds/opensds/testutils/db/testing"
-)
-
-func init() {
- beego.Router("/v1beta/block/replications", NewReplicationPortal(),
- "post:CreateReplication;get:ListReplications")
- beego.Router("/v1beta/block/replications/detail", NewReplicationPortal(),
- "get:ListReplicationsDetail")
- beego.Router("/v1beta/block/replications/:replicationId", NewReplicationPortal(),
- "get:GetReplication;put:UpdateReplication;delete:DeleteReplication")
-}
-
-func TestListReplicationsDetail(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- var sampleReplications = []*model.ReplicationSpec{&SampleReplications[0]}
- mockClient := new(dbtest.Client)
- m := map[string][]string{
- "offset": {"0"},
- "limit": {"1"},
- "sortDir": {"asc"},
- "sortKey": {"name"},
- }
- mockClient.On("ListReplicationWithFilter", c.NewAdminContext(), m).Return(sampleReplications, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/block/replications/detail?offset=0&limit=1&sortDir=asc&sortKey=name", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output []*model.ReplicationSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, output, sampleReplications)
- })
-}
-
-func TestListReplications(t *testing.T) {
- var expectedJson = []byte(`[
- {
- "id": "c299a978-4f3e-11e8-8a5c-977218a83359",
- "name": "sample-replication-01"
- }
- ]`)
- var expected []*model.ReplicationSpec
- json.Unmarshal([]byte(expectedJson), &expected)
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- var sampleReplications = []*model.ReplicationSpec{&SampleReplications[0]}
- mockClient := new(dbtest.Client)
- m := map[string][]string{
- "offset": {"0"},
- "limit": {"1"},
- "sortDir": {"asc"},
- "sortKey": {"name"},
- }
- mockClient.On("ListReplicationWithFilter", c.NewAdminContext(), m).Return(sampleReplications, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/block/replications?offset=0&limit=1&sortDir=asc&sortKey=name", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output []*model.ReplicationSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, output, expected)
- })
-
- t.Run("Should return 500 if list volume replications with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- m := map[string][]string{
- "offset": {"0"},
- "limit": {"1"},
- "sortDir": {"asc"},
- "sortKey": {"name"},
- }
- mockClient.On("ListReplicationWithFilter", c.NewAdminContext(), m).Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/block/replications?offset=0&limit=1&sortDir=asc&sortKey=name", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 500)
- })
-}
-
-func TestGetReplication(t *testing.T) {
- var expectedJson = []byte(`{
- "id": "c299a978-4f3e-11e8-8a5c-977218a83359",
- "primaryVolumeId": "bd5b12a8-a101-11e7-941e-d77981b584d8",
- "secondaryVolumeId": "bd5b12a8-a101-11e7-941e-d77981b584d8",
- "name": "sample-replication-01",
- "description": "This is a sample replication for testing",
- "profileId": "1106b972-66ef-11e7-b172-db03f3689c9c"
- }`)
- var expected model.ReplicationSpec
- json.Unmarshal(expectedJson, &expected)
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetReplication", c.NewAdminContext(), "c299a978-4f3e-11e8-8a5c-977218a83359").Return(&SampleReplications[0], nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/block/replications/c299a978-4f3e-11e8-8a5c-977218a83359", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output model.ReplicationSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, &output, &expected)
- })
-
- t.Run("Should return 404 if get volume replication with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetReplication", c.NewAdminContext(), "c299a978-4f3e-11e8-8a5c-977218a83359").Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/block/replications/c299a978-4f3e-11e8-8a5c-977218a83359", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 404)
- })
-}
-
-func TestUpdateReplication(t *testing.T) {
- var jsonStr = []byte(`{
- "id": "c299a978-4f3e-11e8-8a5c-977218a83359",
- "name":"fake replication",
- "description":"fake replication"
- }`)
- var expectedJson = []byte(`{
- "id": "c299a978-4f3e-11e8-8a5c-977218a83359",
- "primaryVolumeId": "bd5b12a8-a101-11e7-941e-d77981b584d8",
- "secondaryVolumeId": "bd5b12a8-a101-11e7-941e-d77981b584d8",
- "name": "fake replication",
- "description": "fake replication",
- "poolId": "084bf71e-a102-11e7-88a8-e31fe6d52248",
- "profileId": "1106b972-66ef-11e7-b172-db03f3689c9c"
- }`)
- var expected model.ReplicationSpec
- json.Unmarshal(expectedJson, &expected)
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- replication := model.ReplicationSpec{BaseModel: &model.BaseModel{}}
- json.NewDecoder(bytes.NewBuffer(jsonStr)).Decode(&replication)
- mockClient := new(dbtest.Client)
- mockClient.On("UpdateReplication", c.NewAdminContext(), replication.Id, &replication).Return(&expected, nil)
- mockClient.On("GetProfile", c.NewAdminContext(), SampleReplications[0].ProfileId).Return(&SampleProfiles[0], nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("PUT",
- "/v1beta/block/replications/c299a978-4f3e-11e8-8a5c-977218a83359", bytes.NewBuffer(jsonStr))
- w := httptest.NewRecorder()
- r.Header.Set("Content-Type", "application/json")
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output model.ReplicationSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, &output, &expected)
- })
-
- t.Run("Should return 500 if update volume replication with bad request", func(t *testing.T) {
- replication := model.ReplicationSpec{BaseModel: &model.BaseModel{}}
- json.NewDecoder(bytes.NewBuffer(jsonStr)).Decode(&replication)
- mockClient := new(dbtest.Client)
- mockClient.On("UpdateReplication", c.NewAdminContext(), replication.Id,
- &replication).Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("PUT",
- "/v1beta/block/replications/c299a978-4f3e-11e8-8a5c-977218a83359", bytes.NewBuffer(jsonStr))
- w := httptest.NewRecorder()
- r.Header.Set("Content-Type", "application/json")
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 500)
- })
-}
diff --git a/pkg/api/controllers/version.go b/pkg/api/controllers/version.go
deleted file mode 100755
index cc1a9b1f2..000000000
--- a/pkg/api/controllers/version.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements a entry into the OpenSDS northbound REST service.
-
-*/
-
-package controllers
-
-import (
- "encoding/json"
- "fmt"
-
- "github.com/opensds/opensds/pkg/model"
-)
-
-// KnownVersions
-var KnownVersions = []map[string]string{
- {
- "name": "v1beta",
- "description": "v1beta version",
- "status": "CURRENT",
- "updatedAt": "2017-07-10T14:36:58.014Z",
- },
-}
-
-// VersionPortal
-type VersionPortal struct {
- BasePortal
-}
-
-// ListVersions
-func (v *VersionPortal) ListVersions() {
- body, err := json.Marshal(KnownVersions)
- if err != nil {
- errMsg := fmt.Sprintf("marshal versions failed: %s", err.Error())
- v.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- v.SuccessHandle(StatusOK, body)
- return
-}
-
-// GetVersion
-func (v *VersionPortal) GetVersion() {
- apiVersion := v.Ctx.Input.Param(":apiVersion")
-
- // Find version by specified api version
- var result map[string]string
- for _, version := range KnownVersions {
- if version["name"] == apiVersion {
- result = version
- break
- }
- }
- if result == nil {
- errMsg := fmt.Sprintf("can't find the version: %s", apiVersion)
- v.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- // Marshal the result.
- body, err := json.Marshal(result)
- if err != nil {
- errMsg := fmt.Sprintf("marshal version failed: %s", err.Error())
- v.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- v.SuccessHandle(StatusOK, body)
- return
-}
diff --git a/pkg/api/controllers/version_test.go b/pkg/api/controllers/version_test.go
deleted file mode 100755
index 045ab1b5e..000000000
--- a/pkg/api/controllers/version_test.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package controllers
-
-import (
- "encoding/json"
- "net/http"
- "net/http/httptest"
- "testing"
-
- "github.com/astaxie/beego"
- "github.com/astaxie/beego/context"
- c "github.com/opensds/opensds/pkg/context"
-)
-
-func init() {
- var versionPortal VersionPortal
- beego.Router("/", &versionPortal, "get:ListVersions")
- beego.Router("/:apiVersion", &versionPortal, "get:GetVersion")
-}
-
-func TestListVersions(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- r, _ := http.NewRequest("GET", "/", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
-
- var output []map[string]string
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, output, KnownVersions)
- })
-}
-
-func TestGetVersion(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- r, _ := http.NewRequest("GET", "/v1beta", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
-
- var output map[string]string
- json.Unmarshal(w.Body.Bytes(), &output)
- var expected = map[string]string{
- "name": "v1beta",
- "description": "v1beta version",
- "status": "CURRENT",
- "updatedAt": "2017-07-10T14:36:58.014Z",
- }
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, output, expected)
- })
-
- t.Run("Should return 404 if get version with invalid API version", func(t *testing.T) {
- r, _ := http.NewRequest("GET", "/InvalidAPIVersion", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 404)
- })
-}
diff --git a/pkg/api/controllers/volume.go b/pkg/api/controllers/volume.go
deleted file mode 100755
index caa5d5021..000000000
--- a/pkg/api/controllers/volume.go
+++ /dev/null
@@ -1,591 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements a entry into the OpenSDS northbound service.
-
-*/
-
-package controllers
-
-import (
- "context"
- "encoding/json"
- "fmt"
-
- log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/api/policy"
- "github.com/opensds/opensds/pkg/api/util"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/controller/client"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- . "github.com/opensds/opensds/pkg/utils/config"
-)
-
-func NewVolumePortal() *VolumePortal {
- return &VolumePortal{
- CtrClient: client.NewClient(),
- }
-}
-
-type VolumePortal struct {
- BasePortal
-
- CtrClient client.Client
-}
-
-func (v *VolumePortal) CreateVolume() {
- if !policy.Authorize(v.Ctx, "volume:create") {
- return
- }
- ctx := c.GetContext(v.Ctx)
- var volume = model.VolumeSpec{
- BaseModel: &model.BaseModel{},
- }
-
- // Unmarshal the request body
- if err := json.NewDecoder(v.Ctx.Request.Body).Decode(&volume); err != nil {
- errMsg := fmt.Sprintf("parse volume request body failed: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // get profile
- var prf *model.ProfileSpec
- var err error
- if volume.ProfileId == "" {
- log.Warning("Use default profile when user doesn't specify profile.")
- prf, err = db.C.GetDefaultProfile(ctx)
- if err != nil {
- errMsg := fmt.Sprintf("get default profile failed: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
- // Assign the default profile id to volume so that users can know which
- // profile is used for creating a volume.
- volume.ProfileId = prf.Id
- } else {
- prf, err = db.C.GetProfile(ctx, volume.ProfileId)
- }
- if err != nil {
- errMsg := fmt.Sprintf("get profile failed: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // NOTE:It will create a volume entry into the database and initialize its status
- // as "creating". It will not wait for the real volume creation to complete
- // and will return result immediately.
- result, err := util.CreateVolumeDBEntry(ctx, &volume)
- if err != nil {
- errMsg := fmt.Sprintf("create volume failed: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- log.V(8).Infof("create volume DB entry success %+v", result)
-
- // Marshal the result.
- body, _ := json.Marshal(result)
- v.SuccessHandle(StatusAccepted, body)
-
- // NOTE:The real volume creation process.
- // Volume creation request is sent to the Dock. Dock will update volume status to "available"
- // after volume creation is completed.
- if err := v.CtrClient.Connect(CONF.OsdsLet.ApiEndpoint); err != nil {
- log.Error("when connecting controller client:", err)
- return
- }
-
- opt := &pb.CreateVolumeOpts{
- Id: result.Id,
- Name: result.Name,
- Description: result.Description,
- Size: result.Size,
- AvailabilityZone: result.AvailabilityZone,
- // TODO: ProfileId will be removed later.
- ProfileId: result.ProfileId,
- Profile: prf.ToJson(),
- PoolId: result.PoolId,
- SnapshotId: result.SnapshotId,
- Metadata: result.Metadata,
- SnapshotFromCloud: result.SnapshotFromCloud,
- Context: ctx.ToJson(),
- }
- response, err := v.CtrClient.CreateVolume(context.Background(), opt)
- if err != nil {
- log.Error("create volume failed in controller service:", err)
- return
- }
- if errorMsg := response.GetError(); errorMsg != nil {
- log.Errorf("failed to create volume in controller, code: %v, message: %v",
- errorMsg.GetCode(), errorMsg.GetDescription())
- return
- }
-
- return
-}
-
-func (v *VolumePortal) ListVolumes() {
- if !policy.Authorize(v.Ctx, "volume:list") {
- return
- }
- // Call db api module to handle list volumes request.
- m, err := v.GetParameters()
- if err != nil {
- errMsg := fmt.Sprintf("list volumes failed: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- result, err := db.C.ListVolumesWithFilter(c.GetContext(v.Ctx), m)
- if err != nil {
- errMsg := fmt.Sprintf("list volumes failed: %s", err.Error())
- v.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- // Marshal the result.
- body, _ := json.Marshal(result)
- v.SuccessHandle(StatusOK, body)
-
- return
-}
-
-func (v *VolumePortal) GetVolume() {
- if !policy.Authorize(v.Ctx, "volume:get") {
- return
- }
- id := v.Ctx.Input.Param(":volumeId")
-
- // Call db api module to handle get volume request.
- result, err := db.C.GetVolume(c.GetContext(v.Ctx), id)
- if err != nil {
- errMsg := fmt.Sprintf("volume %s not found: %s", id, err.Error())
- v.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- // Marshal the result.
- body, _ := json.Marshal(result)
- v.SuccessHandle(StatusOK, body)
-
- return
-}
-
-func (v *VolumePortal) UpdateVolume() {
- if !policy.Authorize(v.Ctx, "volume:update") {
- return
- }
- var volume = model.VolumeSpec{
- BaseModel: &model.BaseModel{},
- }
-
- id := v.Ctx.Input.Param(":volumeId")
- if err := json.NewDecoder(v.Ctx.Request.Body).Decode(&volume); err != nil {
- errMsg := fmt.Sprintf("parse volume request body failed: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- volume.Id = id
- result, err := db.C.UpdateVolume(c.GetContext(v.Ctx), &volume)
- if err != nil {
- errMsg := fmt.Sprintf("update volume failed: %s", err.Error())
- v.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- // Marshal the result.
- body, _ := json.Marshal(result)
- v.SuccessHandle(StatusOK, body)
-
- return
-}
-
-// ExtendVolume ...
-func (v *VolumePortal) ExtendVolume() {
- if !policy.Authorize(v.Ctx, "volume:extend") {
- return
- }
- ctx := c.GetContext(v.Ctx)
- var extendRequestBody = model.ExtendVolumeSpec{}
-
- if err := json.NewDecoder(v.Ctx.Request.Body).Decode(&extendRequestBody); err != nil {
- errMsg := fmt.Sprintf("parse volume request body failed: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- id := v.Ctx.Input.Param(":volumeId")
- volume, err := db.C.GetVolume(ctx, id)
- if err != nil {
- errMsg := fmt.Sprintf("volume %s not found: %s", id, err.Error())
- v.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- prf, err := db.C.GetProfile(ctx, volume.ProfileId)
- if err != nil {
- errMsg := fmt.Sprintf("extend volume failed: %v", err.Error())
- v.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- // NOTE:It will update the the status of the volume waiting for expansion in
- // the database to "extending" and return the result immediately.
- result, err := util.ExtendVolumeDBEntry(ctx, id, &extendRequestBody)
- if err != nil {
- errMsg := fmt.Sprintf("extend volume failed: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // Marshal the result.
- body, _ := json.Marshal(result)
- v.SuccessHandle(StatusAccepted, body)
-
- // NOTE:The real volume extension process.
- // Volume extension request is sent to the Dock. Dock will update volume status to "available"
- // after volume extension is completed.
- if err = v.CtrClient.Connect(CONF.OsdsLet.ApiEndpoint); err != nil {
- log.Error("when connecting controller client:", err)
- return
- }
-
- opt := &pb.ExtendVolumeOpts{
- Id: id,
- Size: extendRequestBody.NewSize,
- Metadata: result.Metadata,
- Context: ctx.ToJson(),
- Profile: prf.ToJson(),
- }
- response, err := v.CtrClient.ExtendVolume(context.Background(), opt)
- if err != nil {
- log.Error("extend volume failed in controller service:", err)
- return
- }
- if errorMsg := response.GetError(); errorMsg != nil {
- log.Errorf("failed to extend volume in controller, code: %v, message: %v",
- errorMsg.GetCode(), errorMsg.GetDescription())
- return
- }
-
- return
-}
-
-func (v *VolumePortal) DeleteVolume() {
- if !policy.Authorize(v.Ctx, "volume:delete") {
- return
- }
- ctx := c.GetContext(v.Ctx)
-
- var err error
- id := v.Ctx.Input.Param(":volumeId")
- volume, err := db.C.GetVolume(ctx, id)
- if err != nil {
- errMsg := fmt.Sprintf("volume %s not found: %s", id, err.Error())
- v.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- // If profileId or poolId of the volume doesn't exist, it would mean that
- // the volume provisioning operation failed before the create method in
- // storage driver was called, therefore the volume entry should be deleted
- // from db directly.
- if volume.ProfileId == "" || volume.PoolId == "" {
- if err := db.C.DeleteVolume(ctx, volume.Id); err != nil {
- errMsg := fmt.Sprintf("delete volume failed: %v", err.Error())
- v.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
- v.SuccessHandle(StatusAccepted, nil)
- return
- }
-
- prf, err := db.C.GetProfile(ctx, volume.ProfileId)
- if err != nil {
- errMsg := fmt.Sprintf("delete volume failed: %v", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // NOTE:It will update the the status of the volume waiting for deletion in
- // the database to "deleting" and return the result immediately.
- if err = util.DeleteVolumeDBEntry(ctx, volume); err != nil {
- errMsg := fmt.Sprintf("delete volume failed: %v", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- v.SuccessHandle(StatusAccepted, nil)
-
- // NOTE:The real volume deletion process.
- // Volume deletion request is sent to the Dock. Dock will delete volume from driver
- // and database or update volume status to "errorDeleting" if deletion from driver faild.
- if err := v.CtrClient.Connect(CONF.OsdsLet.ApiEndpoint); err != nil {
- log.Error("when connecting controller client:", err)
- return
- }
-
- opt := &pb.DeleteVolumeOpts{
- Id: volume.Id,
- ProfileId: volume.ProfileId,
- PoolId: volume.PoolId,
- Metadata: volume.Metadata,
- Context: ctx.ToJson(),
- Profile: prf.ToJson(),
- }
- response, err := v.CtrClient.DeleteVolume(context.Background(), opt)
- if err != nil {
- log.Error("delete volume failed in controller service:", err)
- return
- }
- if errorMsg := response.GetError(); errorMsg != nil {
- log.Errorf("failed to delete volume in controller, code: %v, message: %v",
- errorMsg.GetCode(), errorMsg.GetDescription())
- return
- }
-
- return
-}
-
-func NewVolumeSnapshotPortal() *VolumeSnapshotPortal {
- return &VolumeSnapshotPortal{
- CtrClient: client.NewClient(),
- }
-}
-
-type VolumeSnapshotPortal struct {
- BasePortal
-
- CtrClient client.Client
-}
-
-func (v *VolumeSnapshotPortal) CreateVolumeSnapshot() {
- if !policy.Authorize(v.Ctx, "snapshot:create") {
- return
- }
- ctx := c.GetContext(v.Ctx)
- var snapshot = model.VolumeSnapshotSpec{
- BaseModel: &model.BaseModel{},
- }
-
- if err := json.NewDecoder(v.Ctx.Request.Body).Decode(&snapshot); err != nil {
- errMsg := fmt.Sprintf("parse volume snapshot request body failed: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // If user doesn't specified profile, using profile derived from volume
- if len(snapshot.ProfileId) == 0 {
- log.Warning("User doesn't specified profile id, using profile derived from volume")
- vol, err := db.C.GetVolume(ctx, snapshot.VolumeId)
- if err != nil {
- v.ErrorHandle(model.ErrorBadRequest, err.Error())
- return
- }
- snapshot.ProfileId = vol.ProfileId
- }
- prf, err := db.C.GetProfile(ctx, snapshot.ProfileId)
- if err != nil {
- errMsg := fmt.Sprintf("get profile failed: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // NOTE:It will create a volume snapshot entry into the database and initialize its status
- // as "creating". It will not wait for the real volume snapshot creation to complete
- // and will return result immediately.
- result, err := util.CreateVolumeSnapshotDBEntry(ctx, &snapshot)
- if err != nil {
- errMsg := fmt.Sprintf("create volume snapshot failed: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // Marshal the result.
- body, _ := json.Marshal(result)
- v.SuccessHandle(StatusAccepted, body)
-
- // NOTE:The real volume snapshot creation process.
- // Volume snapshot creation request is sent to the Dock. Dock will update volume snapshot status to "available"
- // after volume snapshot creation complete.
- if err := v.CtrClient.Connect(CONF.OsdsLet.ApiEndpoint); err != nil {
- log.Error("when connecting controller client:", err)
- return
- }
-
- opt := &pb.CreateVolumeSnapshotOpts{
- Id: result.Id,
- Name: result.Name,
- Description: result.Description,
- VolumeId: result.VolumeId,
- Size: result.Size,
- Metadata: result.Metadata,
- Context: ctx.ToJson(),
- Profile: prf.ToJson(),
- }
- response, err := v.CtrClient.CreateVolumeSnapshot(context.Background(), opt)
- if err != nil {
- log.Error("create volume snapthot failed in controller service:", err)
- return
- }
- if errorMsg := response.GetError(); errorMsg != nil {
- log.Errorf("failed to create volume snapshot in controller, code: %v, message: %v",
- errorMsg.GetCode(), errorMsg.GetDescription())
- return
- }
-
- return
-}
-
-func (v *VolumeSnapshotPortal) ListVolumeSnapshots() {
- if !policy.Authorize(v.Ctx, "snapshot:list") {
- return
- }
- m, err := v.GetParameters()
- if err != nil {
- errMsg := fmt.Sprintf("list volume snapshots failed: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- result, err := db.C.ListVolumeSnapshotsWithFilter(c.GetContext(v.Ctx), m)
- if err != nil {
- errMsg := fmt.Sprintf("list volume snapshots failed: %s", err.Error())
- v.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- // Marshal the result.
- body, _ := json.Marshal(result)
- v.SuccessHandle(StatusOK, body)
-
- return
-}
-
-func (v *VolumeSnapshotPortal) GetVolumeSnapshot() {
- if !policy.Authorize(v.Ctx, "snapshot:get") {
- return
- }
- id := v.Ctx.Input.Param(":snapshotId")
-
- result, err := db.C.GetVolumeSnapshot(c.GetContext(v.Ctx), id)
- if err != nil {
- errMsg := fmt.Sprintf("volume snapshot %s not found: %s", id, err.Error())
- v.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- // Marshal the result.
- body, _ := json.Marshal(result)
- v.SuccessHandle(StatusOK, body)
-
- return
-}
-
-func (v *VolumeSnapshotPortal) UpdateVolumeSnapshot() {
- if !policy.Authorize(v.Ctx, "snapshot:update") {
- return
- }
- var snapshot = model.VolumeSnapshotSpec{
- BaseModel: &model.BaseModel{},
- }
-
- id := v.Ctx.Input.Param(":snapshotId")
-
- if err := json.NewDecoder(v.Ctx.Request.Body).Decode(&snapshot); err != nil {
- errMsg := fmt.Sprintf("parse volume snapshot request body failed: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
- snapshot.Id = id
-
- result, err := db.C.UpdateVolumeSnapshot(c.GetContext(v.Ctx), id, &snapshot)
- if err != nil {
- errMsg := fmt.Sprintf("update volume snapshot failed: %s", err.Error())
- v.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- // Marshal the result.
- body, _ := json.Marshal(result)
- v.SuccessHandle(StatusOK, body)
-
- return
-}
-
-func (v *VolumeSnapshotPortal) DeleteVolumeSnapshot() {
- if !policy.Authorize(v.Ctx, "snapshot:delete") {
- return
- }
- ctx := c.GetContext(v.Ctx)
- id := v.Ctx.Input.Param(":snapshotId")
-
- snapshot, err := db.C.GetVolumeSnapshot(ctx, id)
- if err != nil {
- errMsg := fmt.Sprintf("volume snapshot %s not found: %s", id, err.Error())
- v.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- prf, err := db.C.GetProfile(ctx, snapshot.ProfileId)
- if err != nil {
- errMsg := fmt.Sprintf("delete snapshot failed: %v", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // NOTE:It will update the the status of the volume snapshot waiting for deletion in
- // the database to "deleting" and return the result immediately.
- err = util.DeleteVolumeSnapshotDBEntry(ctx, snapshot)
- if err != nil {
- errMsg := fmt.Sprintf("delete volume snapshot failed: %v", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- v.SuccessHandle(StatusAccepted, nil)
-
- // NOTE:The real volume snapshot deletion process.
- // Volume snapshot deletion request is sent to the Dock. Dock will delete volume snapshot from driver and
- // database or update its status to "errorDeleting" if volume snapshot deletion from driver failed.
- if err := v.CtrClient.Connect(CONF.OsdsLet.ApiEndpoint); err != nil {
- log.Error("when connecting controller client:", err)
- return
- }
-
- opt := &pb.DeleteVolumeSnapshotOpts{
- Id: snapshot.Id,
- VolumeId: snapshot.VolumeId,
- Metadata: snapshot.Metadata,
- Context: ctx.ToJson(),
- Profile: prf.ToJson(),
- }
- response, err := v.CtrClient.DeleteVolumeSnapshot(context.Background(), opt)
- if err != nil {
- log.Error("delete volume snapthot failed in controller service:", err)
- return
- }
- if errorMsg := response.GetError(); errorMsg != nil {
- log.Errorf("failed to delete volume snapshot in controller, code: %v, message: %v",
- errorMsg.GetCode(), errorMsg.GetDescription())
- return
- }
-
- return
-}
diff --git a/pkg/api/controllers/volumeGroup.go b/pkg/api/controllers/volumeGroup.go
deleted file mode 100644
index 239bb5045..000000000
--- a/pkg/api/controllers/volumeGroup.go
+++ /dev/null
@@ -1,295 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package controllers
-
-import (
- "context"
- "encoding/json"
- "fmt"
-
- log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/api/policy"
- "github.com/opensds/opensds/pkg/api/util"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/controller/client"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- . "github.com/opensds/opensds/pkg/utils/config"
-)
-
-func NewVolumeGroupPortal() *VolumeGroupPortal {
- return &VolumeGroupPortal{
- CtrClient: client.NewClient(),
- }
-}
-
-type VolumeGroupPortal struct {
- BasePortal
-
- CtrClient client.Client
-}
-
-func (v *VolumeGroupPortal) CreateVolumeGroup() {
- if !policy.Authorize(v.Ctx, "volume_group:create") {
- return
- }
- ctx := c.GetContext(v.Ctx)
-
- var volumeGroup = &model.VolumeGroupSpec{
- BaseModel: &model.BaseModel{},
- }
-
- // Unmarshal the request body
- if err := json.NewDecoder(v.Ctx.Request.Body).Decode(&volumeGroup); err != nil {
- errMsg := fmt.Sprintf("parse volume group request body failed: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
- // NOTE:It will create a volume group entry into the database and initialize its status
- // as "creating". It will not wait for the real volume group process creation to complete
- // and will return result immediately.
- result, err := util.CreateVolumeGroupDBEntry(ctx, volumeGroup)
- if err != nil {
- errMsg := fmt.Sprintf("create volume group failed: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // Marshal the result.
- body, err := json.Marshal(result)
- if err != nil {
- errMsg := fmt.Sprintf("marshal volume group created result failed: %s", err.Error())
- v.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
- v.SuccessHandle(StatusAccepted, body)
-
- // NOTE:The real volume group creation process.
- // Volume group creation request is sent to the Dock. Dock will set
- // volume group status to 'available' after volume group creation operation
- // is completed.
- if err = v.CtrClient.Connect(CONF.OsdsLet.ApiEndpoint); err != nil {
- log.Error("when connecting controller client:", err)
- return
- }
-
- opt := &pb.CreateVolumeGroupOpts{
- Id: result.Id,
- Name: result.Name,
- Description: result.Description,
- AvailabilityZone: result.AvailabilityZone,
- AddVolumes: result.AddVolumes,
- RemoveVolumes: result.RemoveVolumes,
- Context: ctx.ToJson(),
- }
- response, err := v.CtrClient.CreateVolumeGroup(context.Background(), opt)
- if err != nil {
- log.Error("create volume group failed in controller service:", err)
- return
- }
- if errorMsg := response.GetError(); errorMsg != nil {
- log.Errorf("failed to create volume group in controller, code: %v, message: %v",
- errorMsg.GetCode(), errorMsg.GetDescription())
- return
- }
-
- return
-}
-
-func (v *VolumeGroupPortal) UpdateVolumeGroup() {
- if !policy.Authorize(v.Ctx, "volume_group:update") {
- return
- }
- ctx := c.GetContext(v.Ctx)
- var vg = &model.VolumeGroupSpec{
- BaseModel: &model.BaseModel{},
- }
-
- id := v.Ctx.Input.Param(":groupId")
- if err := json.NewDecoder(v.Ctx.Request.Body).Decode(&vg); err != nil {
- errMsg := fmt.Sprintf("parse volume group request body failed: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- vg.Id = id
- addVolumes, removeVolumes, err := util.UpdateVolumeGroupDBEntry(ctx, vg)
- if err != nil {
- errMsg := fmt.Sprintf("update volume group failed: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- // get pool id
- var poolId string
- vgNew, err := db.C.GetVolumeGroup(ctx, id)
- if err != nil {
- errMsg := fmt.Sprintf("volume group %s not found: %s", id, err.Error())
- v.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
- poolId = vgNew.PoolId
-
- // Marshal the result.
- body, err := json.Marshal(vgNew)
- if err != nil {
- errMsg := fmt.Sprintf("marshal volume group updated result failed: %s", err.Error())
- v.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- v.SuccessHandle(StatusAccepted, body)
-
- // No more values in group need to be updated
- if len(addVolumes) == 0 && len(removeVolumes) == 0 {
- return
- }
-
- // NOTE:The real volume group update process.
- // Volume group update request is sent to the Dock. Dock will set
- // volume group status to 'available' after volume group creation operation
- // is completed.
- if err = v.CtrClient.Connect(CONF.OsdsLet.ApiEndpoint); err != nil {
- log.Error("when connecting controller client:", err)
- return
- }
-
- opt := &pb.UpdateVolumeGroupOpts{
- Id: id,
- AddVolumes: addVolumes,
- RemoveVolumes: removeVolumes,
- PoolId: poolId,
- Context: ctx.ToJson(),
- }
- response, err := v.CtrClient.UpdateVolumeGroup(context.Background(), opt)
- if err != nil {
- log.Error("update volume group failed in controller service:", err)
- return
- }
- if errorMsg := response.GetError(); errorMsg != nil {
- log.Errorf("failed to update volume group in controller, code: %v, message: %v",
- errorMsg.GetCode(), errorMsg.GetDescription())
- return
- }
-
- return
-}
-
-func (v *VolumeGroupPortal) DeleteVolumeGroup() {
- if !policy.Authorize(v.Ctx, "volume_group:delete") {
- return
- }
- ctx := c.GetContext(v.Ctx)
-
- id := v.Ctx.Input.Param(":groupId")
- vg, err := db.C.GetVolumeGroup(ctx, id)
- if err != nil {
- errMsg := fmt.Sprintf("volume group %s not found: %s", id, err.Error())
- v.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- if err = util.DeleteVolumeGroupDBEntry(c.GetContext(v.Ctx), id); err != nil {
- errMsg := fmt.Sprintf("delete volume group failed: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- v.SuccessHandle(StatusAccepted, nil)
-
- // NOTE:The real volume group deletion process.
- // Volume group deletion request is sent to the Dock. Dock will remove
- // volume group record after volume group deletion operation is completed.
- if err = v.CtrClient.Connect(CONF.OsdsLet.ApiEndpoint); err != nil {
- log.Error("when connecting controller client:", err)
- return
- }
-
- opt := &pb.DeleteVolumeGroupOpts{
- Id: id,
- PoolId: vg.PoolId,
- Context: ctx.ToJson(),
- }
- response, err := v.CtrClient.DeleteVolumeGroup(context.Background(), opt)
- if err != nil {
- log.Error("delete volume group failed in controller service:", err)
- return
- }
- if errorMsg := response.GetError(); errorMsg != nil {
- log.Errorf("failed to delete volume group in controller, code: %v, message: %v",
- errorMsg.GetCode(), errorMsg.GetDescription())
- return
- }
-
- return
-}
-
-func (v *VolumeGroupPortal) GetVolumeGroup() {
- if !policy.Authorize(v.Ctx, "volume_group:get") {
- return
- }
-
- id := v.Ctx.Input.Param(":groupId")
- // Call db api module to handle get volume request.
- result, err := db.C.GetVolumeGroup(c.GetContext(v.Ctx), id)
- if err != nil {
- errMsg := fmt.Sprintf("volume group %s not found: %s", id, err.Error())
- v.ErrorHandle(model.ErrorNotFound, errMsg)
- return
- }
-
- // Marshal the result.
- body, err := json.Marshal(result)
- if err != nil {
- errMsg := fmt.Sprintf("marshal volume group showed result failed: %s", err.Error())
- v.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- v.SuccessHandle(StatusOK, body)
- return
-}
-
-func (v *VolumeGroupPortal) ListVolumeGroups() {
- if !policy.Authorize(v.Ctx, "volume_group:get") {
- return
- }
-
- m, err := v.GetParameters()
- if err != nil {
- errMsg := fmt.Sprintf("list volume group parameters failed: %s", err.Error())
- v.ErrorHandle(model.ErrorBadRequest, errMsg)
- return
- }
-
- result, err := db.C.ListVolumeGroupsWithFilter(c.GetContext(v.Ctx), m)
- if err != nil {
- errMsg := fmt.Sprintf("list volume groups failed: %s", err.Error())
- v.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- // Marshal the result.
- body, err := json.Marshal(result)
- if err != nil {
- errMsg := fmt.Sprintf("marshal volume groups listed result failed: %s", err.Error())
- v.ErrorHandle(model.ErrorInternalServer, errMsg)
- return
- }
-
- v.SuccessHandle(StatusOK, body)
- return
-}
diff --git a/pkg/api/controllers/volumeGroup_test.go b/pkg/api/controllers/volumeGroup_test.go
deleted file mode 100644
index e8f9b39ee..000000000
--- a/pkg/api/controllers/volumeGroup_test.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package controllers
-
-import (
- "encoding/json"
- "errors"
- "net/http"
- "net/http/httptest"
- "testing"
-
- "github.com/astaxie/beego"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
- . "github.com/opensds/opensds/testutils/collection"
- dbtest "github.com/opensds/opensds/testutils/db/testing"
-)
-
-func init() {
- beego.Router("/v1beta/block/volumeGroups", &VolumeGroupPortal{}, "post:CreateVolumeGroup;get:ListVolumeGroups")
- beego.Router("/v1beta/block/volumeGroups/:groupId", &VolumeGroupPortal{}, "put:UpdateVolumeGroup;get:GetVolumeGroup;delete:DeleteVolumeGroup")
-}
-
-func TestListVolumeGroups(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- var sampleVGs = []*model.VolumeGroupSpec{&SampleVolumeGroups[0]}
- mockClient := new(dbtest.Client)
- m := map[string][]string{
- "offset": {"0"},
- "limit": {"1"},
- "sortDir": {"asc"},
- "sortKey": {"name"},
- }
- mockClient.On("ListVolumeGroupsWithFilter", c.NewAdminContext(), m).Return(sampleVGs, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/block/volumeGroups?offset=0&limit=1&sortDir=asc&sortKey=name", nil)
- w := httptest.NewRecorder()
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output []*model.VolumeGroupSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, output, sampleVGs)
- })
-
- t.Run("Should return 500 if list volume groups with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- m := map[string][]string{
- "offset": {"0"},
- "limit": {"1"},
- "sortDir": {"asc"},
- "sortKey": {"name"},
- }
- mockClient.On("ListVolumeGroupsWithFilter", c.NewAdminContext(), m).Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/block/volumeGroups?offset=0&limit=1&sortDir=asc&sortKey=name", nil)
- w := httptest.NewRecorder()
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 500)
- })
-}
-
-func TestGetVolumeGroup(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetVolumeGroup", c.NewAdminContext(), "3769855c-a102-11e7-b772-17b880d2f555").Return(&SampleVolumeGroups[0], nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/block/volumeGroups/3769855c-a102-11e7-b772-17b880d2f555", nil)
- w := httptest.NewRecorder()
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output model.VolumeGroupSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, &output, &SampleVolumeGroups[0])
- })
-
- t.Run("Should return 404 if get volume group with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetVolumeGroup", c.NewAdminContext(), "3769855c-a102-11e7-b772-17b880d2f555").Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/block/volumeGroups/3769855c-a102-11e7-b772-17b880d2f555", nil)
- w := httptest.NewRecorder()
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 404)
- })
-}
diff --git a/pkg/api/controllers/volume_test.go b/pkg/api/controllers/volume_test.go
deleted file mode 100755
index 5481a1ded..000000000
--- a/pkg/api/controllers/volume_test.go
+++ /dev/null
@@ -1,436 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package controllers
-
-import (
- "bytes"
- ctx "context"
- "encoding/json"
- "errors"
- "net/http"
- "net/http/httptest"
- "testing"
-
- "github.com/astaxie/beego"
- "github.com/astaxie/beego/context"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- . "github.com/opensds/opensds/testutils/collection"
- ctrtest "github.com/opensds/opensds/testutils/controller/testing"
- dbtest "github.com/opensds/opensds/testutils/db/testing"
-)
-
-////////////////////////////////////////////////////////////////////////////////
-// Prepare for mock server //
-////////////////////////////////////////////////////////////////////////////////
-
-func init() {
- beego.Router("/v1beta/block/volumes", NewFakeVolumePortal(),
- "post:CreateVolume;get:ListVolumes")
- beego.Router("/v1beta/block/volumes/:volumeId", NewFakeVolumePortal(),
- "get:GetVolume;put:UpdateVolume;delete:DeleteVolume")
- beego.Router("/v1beta/block/volumes/:volumeId/resize", NewFakeVolumePortal(),
- "post:ExtendVolume")
-
- beego.Router("/v1beta/block/snapshots", &VolumeSnapshotPortal{},
- "post:CreateVolumeSnapshot;get:ListVolumeSnapshots")
- beego.Router("/v1beta/block/snapshots/:snapshotId", &VolumeSnapshotPortal{},
- "get:GetVolumeSnapshot;put:UpdateVolumeSnapshot;delete:DeleteVolumeSnapshot")
-}
-
-func NewFakeVolumePortal() *VolumePortal {
- mockClient := new(ctrtest.Client)
-
- mockClient.On("Connect", "localhost:50049").Return(nil)
- mockClient.On("Close").Return(nil)
- mockClient.On("CreateVolume", ctx.Background(), &pb.CreateVolumeOpts{
- Context: c.NewAdminContext().ToJson(),
- }).Return(&pb.GenericResponse{}, nil)
- mockClient.On("ExtendVolume", ctx.Background(), &pb.ExtendVolumeOpts{
- Id: "bd5b12a8-a101-11e7-941e-d77981b584d8",
- Size: int64(20),
- Context: c.NewAdminContext().ToJson(),
- Profile: SampleProfiles[0].ToJson(),
- }).Return(&pb.GenericResponse{}, nil)
- mockClient.On("DeleteVolume", ctx.Background(), &pb.DeleteVolumeOpts{
- Context: c.NewAdminContext().ToJson(),
- }).Return(&pb.GenericResponse{}, nil)
-
- return &VolumePortal{
- CtrClient: mockClient,
- }
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Tests for volume //
-////////////////////////////////////////////////////////////////////////////////
-
-var (
- fakeVolume = &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: "f4a5e666-c669-4c64-a2a1-8f9ecd560c78",
- CreatedAt: "2017-10-24T16:21:32",
- },
- Name: "fake Vol",
- Description: "fake Vol",
- Size: 99,
- AvailabilityZone: "unknown",
- Status: "available",
- PoolId: "831fa5fb-17cf-4410-bec6-1f4b06208eef",
- ProfileId: "d3a109ff-3e51-4625-9054-32604c79fa90",
- }
- fakeVolumes = []*model.VolumeSpec{fakeVolume}
-)
-
-func TestListVolumes(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- var sampleVolumes = []*model.VolumeSpec{&SampleVolumes[0], &SampleVolumes[1]}
- mockClient := new(dbtest.Client)
- m := map[string][]string{
- "offset": {"0"},
- "limit": {"1"},
- "sortDir": {"asc"},
- "sortKey": {"name"},
- }
- mockClient.On("ListVolumesWithFilter", c.NewAdminContext(), m).Return(sampleVolumes, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/block/volumes?offset=0&limit=1&sortDir=asc&sortKey=name", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
-
- var output []*model.VolumeSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, output, sampleVolumes)
- })
-
- t.Run("Should return 500 if list volume with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- m := map[string][]string{
- "offset": {"0"},
- "limit": {"1"},
- "sortDir": {"asc"},
- "sortKey": {"name"},
- }
- mockClient.On("ListVolumesWithFilter", c.NewAdminContext(), m).Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/block/volumes?offset=0&limit=1&sortDir=asc&sortKey=name", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 500)
- })
-}
-
-func TestGetVolume(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetVolume", c.NewAdminContext(), "bd5b12a8-a101-11e7-941e-d77981b584d8").Return(&SampleVolumes[0], nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/block/volumes/bd5b12a8-a101-11e7-941e-d77981b584d8", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
-
- var output model.VolumeSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, &output, &SampleVolumes[0])
- })
-
- t.Run("Should return 404 if get volume replication with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetVolume", c.NewAdminContext(), "bd5b12a8-a101-11e7-941e-d77981b584d8").Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/block/volumes/bd5b12a8-a101-11e7-941e-d77981b584d8", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 404)
- })
-}
-
-func TestUpdateVolume(t *testing.T) {
- var jsonStr = []byte(`{
- "id": "bd5b12a8-a101-11e7-941e-d77981b584d8",
- "name":"fake Vol",
- "description":"fake Vol"
- }`)
- var expectedJson = []byte(`{
- "id": "bd5b12a8-a101-11e7-941e-d77981b584d8",
- "name": "fake Vol",
- "description": "fake Vol",
- "size": 1,
- "status": "available",
- "poolId": "084bf71e-a102-11e7-88a8-e31fe6d52248",
- "profileId": "1106b972-66ef-11e7-b172-db03f3689c9c"
- }`)
- var expected model.VolumeSpec
- json.Unmarshal(expectedJson, &expected)
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- volume := model.VolumeSpec{BaseModel: &model.BaseModel{}}
- json.NewDecoder(bytes.NewBuffer(jsonStr)).Decode(&volume)
- mockClient := new(dbtest.Client)
- mockClient.On("UpdateVolume", c.NewAdminContext(), &volume).Return(&expected, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("PUT", "/v1beta/block/volumes/bd5b12a8-a101-11e7-941e-d77981b584d8", bytes.NewBuffer(jsonStr))
- w := httptest.NewRecorder()
- r.Header.Set("Content-Type", "application/JSON")
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output model.VolumeSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, &output, &expected)
- })
-
- t.Run("Should return 500 if update volume with bad request", func(t *testing.T) {
- volume := model.VolumeSpec{BaseModel: &model.BaseModel{}}
- json.NewDecoder(bytes.NewBuffer(jsonStr)).Decode(&volume)
- mockClient := new(dbtest.Client)
- mockClient.On("UpdateVolume", c.NewAdminContext(), &volume).Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("PUT", "/v1beta/block/volumes/bd5b12a8-a101-11e7-941e-d77981b584d8", bytes.NewBuffer(jsonStr))
- w := httptest.NewRecorder()
- r.Header.Set("Content-Type", "application/JSON")
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 500)
- })
-}
-
-func TestExtendVolume(t *testing.T) {
- var jsonStr = []byte(`{
- "newSize":20
- }`)
- var expectedJson = []byte(`{
- "id": "bd5b12a8-a101-11e7-941e-d77981b584d8",
- "name": "sample-volume",
- "description": "This is a sample volume for testing",
- "size": 1,
- "availabilityZone": "default",
- "status": "extending",
- "poolId": "084bf71e-a102-11e7-88a8-e31fe6d52248",
- "profileId": "1106b972-66ef-11e7-b172-db03f3689c9c"
- }`)
- var expected model.VolumeSpec
- json.Unmarshal(expectedJson, &expected)
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetVolume", c.NewAdminContext(), "bd5b12a8-a101-11e7-941e-d77981b584d8").Return(&SampleVolumes[0], nil)
- mockClient.On("ExtendVolume", c.NewAdminContext(), &expected).Return(&expected, nil)
- mockClient.On("GetProfile", c.NewAdminContext(), SampleReplications[0].ProfileId).Return(&SampleProfiles[0], nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("POST", "/v1beta/block/volumes/bd5b12a8-a101-11e7-941e-d77981b584d8/resize", bytes.NewBuffer(jsonStr))
- w := httptest.NewRecorder()
- r.Header.Set("Content-Type", "application/JSON")
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output model.VolumeSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 202)
- assertTestResult(t, &output, &expected)
- })
-
- t.Run("Should return 400 if extend volume with bad request", func(t *testing.T) {
- jsonStr = []byte(`{
- "newSize": 1
- }`)
- mockClient := new(dbtest.Client)
- mockClient.On("GetVolume", c.NewAdminContext(), "bd5b12a8-a101-11e7-941e-d77981b584d8").Return(&SampleVolumes[0], nil)
- mockClient.On("ExtendVolume", c.NewAdminContext(), &expected).Return(&expected, nil)
- mockClient.On("GetProfile", c.NewAdminContext(), SampleReplications[0].ProfileId).Return(&SampleProfiles[0], nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("POST", "/v1beta/block/volumes/bd5b12a8-a101-11e7-941e-d77981b584d8/resize", bytes.NewBuffer(jsonStr))
- w := httptest.NewRecorder()
- r.Header.Set("Content-Type", "application/JSON")
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 400)
- })
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Tests for volume snapshot //
-////////////////////////////////////////////////////////////////////////////////
-
-func TestListVolumeSnapshots(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- var sampleSnapshots = []*model.VolumeSnapshotSpec{&SampleSnapshots[0], &SampleSnapshots[1]}
- mockClient := new(dbtest.Client)
- m := map[string][]string{
- "offset": {"0"},
- "limit": {"1"},
- "sortDir": {"asc"},
- "sortKey": {"name"},
- }
- mockClient.On("ListVolumeSnapshotsWithFilter", c.NewAdminContext(), m).Return(sampleSnapshots, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/block/snapshots?offset=0&limit=1&sortDir=asc&sortKey=name", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
-
- var output []*model.VolumeSnapshotSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, output, sampleSnapshots)
- })
-
- t.Run("Should return 500 if list volume snapshots with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- m := map[string][]string{
- "offset": {"0"},
- "limit": {"1"},
- "sortDir": {"asc"},
- "sortKey": {"name"},
- }
- mockClient.On("ListVolumeSnapshotsWithFilter", c.NewAdminContext(), m).Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/block/snapshots?offset=0&limit=1&sortDir=asc&sortKey=name", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 500)
- })
-}
-
-func TestGetVolumeSnapshot(t *testing.T) {
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetVolumeSnapshot", c.NewAdminContext(), "3769855c-a102-11e7-b772-17b880d2f537").Return(&SampleSnapshots[0], nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/block/snapshots/3769855c-a102-11e7-b772-17b880d2f537", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output model.VolumeSnapshotSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, &output, &SampleSnapshots[0])
- })
-
- t.Run("Should return 404 if get volume group with bad request", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetVolumeSnapshot", c.NewAdminContext(), "3769855c-a102-11e7-b772-17b880d2f537").Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("GET", "/v1beta/block/snapshots/3769855c-a102-11e7-b772-17b880d2f537", nil)
- w := httptest.NewRecorder()
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 404)
- })
-}
-
-func TestUpdateVolumeSnapshot(t *testing.T) {
- var jsonStr = []byte(`{
- "id": "3769855c-a102-11e7-b772-17b880d2f537",
- "name":"fake snapshot",
- "description":"fake snapshot"
- }`)
- var expectedJson = []byte(`{
- "id": "3769855c-a102-11e7-b772-17b880d2f537",
- "name": "fake snapshot",
- "description": "fake snapshot",
- "size": 1,
- "status": "available",
- "volumeId": "bd5b12a8-a101-11e7-941e-d77981b584d8"
- }`)
- var expected model.VolumeSnapshotSpec
- json.Unmarshal(expectedJson, &expected)
-
- t.Run("Should return 200 if everything works well", func(t *testing.T) {
- snapshot := model.VolumeSnapshotSpec{BaseModel: &model.BaseModel{}}
- json.NewDecoder(bytes.NewBuffer(jsonStr)).Decode(&snapshot)
- mockClient := new(dbtest.Client)
- mockClient.On("UpdateVolumeSnapshot", c.NewAdminContext(), snapshot.Id, &snapshot).
- Return(&expected, nil)
- db.C = mockClient
-
- r, _ := http.NewRequest("PUT", "/v1beta/block/snapshots/3769855c-a102-11e7-b772-17b880d2f537", bytes.NewBuffer(jsonStr))
- w := httptest.NewRecorder()
- r.Header.Set("Content-Type", "application/JSON")
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- var output model.VolumeSnapshotSpec
- json.Unmarshal(w.Body.Bytes(), &output)
- assertTestResult(t, w.Code, 200)
- assertTestResult(t, &output, &expected)
- })
-
- t.Run("Should return 500 if update volume snapshot with bad request", func(t *testing.T) {
- snapshot := model.VolumeSnapshotSpec{BaseModel: &model.BaseModel{}}
- json.NewDecoder(bytes.NewBuffer(jsonStr)).Decode(&snapshot)
- mockClient := new(dbtest.Client)
- mockClient.On("UpdateVolumeSnapshot", c.NewAdminContext(), snapshot.Id, &snapshot).
- Return(nil, errors.New("db error"))
- db.C = mockClient
-
- r, _ := http.NewRequest("PUT", "/v1beta/block/snapshots/3769855c-a102-11e7-b772-17b880d2f537", bytes.NewBuffer(jsonStr))
- w := httptest.NewRecorder()
- r.Header.Set("Content-Type", "application/JSON")
- beego.InsertFilter("*", beego.BeforeExec, func(httpCtx *context.Context) {
- httpCtx.Input.SetData("context", c.NewAdminContext())
- })
- beego.BeeApp.Handlers.ServeHTTP(w, r)
- assertTestResult(t, w.Code, 500)
- })
-}
diff --git a/pkg/api/filter/accesslog/accesslog.go b/pkg/api/filter/accesslog/accesslog.go
deleted file mode 100644
index 50902d224..000000000
--- a/pkg/api/filter/accesslog/accesslog.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package accesslog
-
-import (
- "github.com/astaxie/beego"
- bctx "github.com/astaxie/beego/context"
- "github.com/golang/glog"
-)
-
-func Factory() beego.FilterFunc {
- return func(httpCtx *bctx.Context) {
- r := httpCtx.Request
- glog.Infof("\033[32m[D] %s -- %s %s\033[0m\n", r.RemoteAddr, r.Method,
- r.URL)
- }
-}
diff --git a/pkg/api/filter/auth/auth.go b/pkg/api/filter/auth/auth.go
deleted file mode 100644
index f01c74b92..000000000
--- a/pkg/api/filter/auth/auth.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package auth
-
-import (
- "github.com/astaxie/beego"
- "github.com/astaxie/beego/context"
- log "github.com/golang/glog"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/utils/config"
- "github.com/opensds/opensds/pkg/utils/constants"
-)
-
-type AuthBase interface {
- Filter(ctx *context.Context)
-}
-
-func NewNoAuth() AuthBase {
- return &NoAuth{}
-}
-
-type NoAuth struct{}
-
-func (auth *NoAuth) Filter(httpCtx *context.Context) {
- ctx := c.GetContext(httpCtx)
- ctx.TenantId = httpCtx.Input.Param(":tenantId")
- // In noauth case, only the default id is treated as admin role.
- if ctx.TenantId == constants.DefaultTenantId {
- ctx.IsAdmin = true
- }
- httpCtx.Input.SetData("context", ctx)
-}
-
-func Factory() beego.FilterFunc {
- var auth AuthBase
- log.Infof(config.CONF.AuthStrategy)
- switch config.CONF.AuthStrategy {
- case "keystone":
- auth = NewKeystone()
- case "noauth":
- auth = NewNoAuth()
- default:
- auth = NewNoAuth()
- }
- log.Info(auth)
- return auth.Filter
-}
diff --git a/pkg/api/filter/auth/keystone.go b/pkg/api/filter/auth/keystone.go
deleted file mode 100644
index 908cf6dcd..000000000
--- a/pkg/api/filter/auth/keystone.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-// Keystone authentication middleware, only support keystone v3.
-
-package auth
-
-import (
- "net/http"
- "strings"
- "time"
-
- bctx "github.com/astaxie/beego/context"
- log "github.com/golang/glog"
- "github.com/gophercloud/gophercloud"
- "github.com/gophercloud/gophercloud/openstack"
- "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens"
- "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/utils"
- "github.com/opensds/opensds/pkg/utils/config"
- "github.com/opensds/opensds/pkg/utils/constants"
- "github.com/opensds/opensds/pkg/utils/pwd"
-)
-
-func NewKeystone() AuthBase {
- k := &Keystone{}
- if err := k.SetUp(); err != nil {
- // If auth set up failed, raise panic.
- panic(err)
- }
- return k
-}
-
-type Keystone struct {
- identity *gophercloud.ServiceClient
-}
-
-func (k *Keystone) SetUp() error {
- c := config.CONF.KeystoneAuthToken
-
- var pwdCiphertext = c.Password
-
- if c.EnableEncrypted {
- // Decrypte the password
- pwdTool := pwd.NewPwdEncrypter(c.PwdEncrypter)
- password, err := pwdTool.Decrypter(pwdCiphertext)
- if err != nil {
- return err
- }
- pwdCiphertext = password
- }
-
- opts := gophercloud.AuthOptions{
- IdentityEndpoint: c.AuthUrl,
- DomainName: c.UserDomainName,
- Username: c.Username,
- Password: pwdCiphertext,
- TenantName: c.ProjectName,
- }
- provider, err := openstack.AuthenticatedClient(opts)
- if err != nil {
- log.Error("When get auth client:", err)
- return err
- }
- // Only support keystone v3
- k.identity, err = openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{})
- if err != nil {
- log.Error("When get identity session:", err)
- return err
- }
- log.V(4).Infof("Service Token Info: %s", provider.TokenID)
- return nil
-}
-
-func (k *Keystone) setPolicyContext(ctx *bctx.Context, r tokens.GetResult) error {
- roles, err := r.ExtractRoles()
- if err != nil {
- return context.HttpError(ctx, http.StatusUnauthorized, "extract roles failed,%v", err)
- }
-
- var roleNames []string
- for _, role := range roles {
- roleNames = append(roleNames, role.Name)
- }
-
- project, err := r.ExtractProject()
- if err != nil {
- return context.HttpError(ctx, http.StatusUnauthorized, "extract project failed,%v", err)
- }
-
- user, err := r.ExtractUser()
- if err != nil {
- return context.HttpError(ctx, http.StatusUnauthorized, "extract user failed,%v", err)
- }
-
- param := map[string]interface{}{
- "TenantId": project.ID,
- "Roles": roleNames,
- "UserId": user.ID,
- "IsAdminProject": strings.ToLower(project.Name) == "admin",
- }
- context.UpdateContext(ctx, param)
-
- return nil
-}
-
-func (k *Keystone) validateToken(ctx *bctx.Context, token string) error {
- if token == "" {
- return context.HttpError(ctx, http.StatusUnauthorized, "token not found in header")
- }
-
- var r tokens.GetResult
- // The service token may be expired or revoked, so retry to get new token.
- err := utils.Retry(2, "verify token", false, func(retryIdx int, lastErr error) error {
- if retryIdx > 0 {
- // Fixme: Is there any better method ?
- if lastErr.Error() == "Authentication failed" {
- k.SetUp()
- } else {
- return lastErr
- }
- }
- r = tokens.Get(k.identity, token)
- return r.Err
- })
- if err != nil {
- return context.HttpError(ctx, http.StatusUnauthorized, "get token failed,%v", r.Err)
- }
-
- t, err := r.ExtractToken()
- if err != nil {
- return context.HttpError(ctx, http.StatusUnauthorized, "extract token failed,%v", err)
-
- }
- log.V(8).Infof("token: %v", t)
-
- if time.Now().After(t.ExpiresAt) {
- return context.HttpError(ctx, http.StatusUnauthorized,
- "token has expired, expire time %v", t.ExpiresAt)
- }
- return k.setPolicyContext(ctx, r)
-}
-
-func (k *Keystone) Filter(ctx *bctx.Context) {
- // Strip the spaces around the token
- token := strings.TrimSpace(ctx.Input.Header(constants.AuthTokenHeader))
- k.validateToken(ctx, token)
-}
diff --git a/pkg/api/filter/context/context.go b/pkg/api/filter/context/context.go
deleted file mode 100644
index d7604b51c..000000000
--- a/pkg/api/filter/context/context.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package context
-
-import (
- "github.com/astaxie/beego"
- bctx "github.com/astaxie/beego/context"
- c "github.com/opensds/opensds/pkg/context"
-)
-
-func Factory() beego.FilterFunc {
- return func(httpCtx *bctx.Context) {
- c.UpdateContext(httpCtx, map[string]interface{}{
- "Uri": httpCtx.Input.URI(),
- })
- }
-}
diff --git a/pkg/api/filter/validation/validation.go b/pkg/api/filter/validation/validation.go
deleted file mode 100644
index 5d4e57f17..000000000
--- a/pkg/api/filter/validation/validation.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package validation
-
-import (
- "context"
- "fmt"
- "net/http"
- "strings"
-
- "github.com/astaxie/beego"
- bctx "github.com/astaxie/beego/context"
- "github.com/getkin/kin-openapi/openapi3"
- "github.com/getkin/kin-openapi/openapi3filter"
- "github.com/golang/glog"
- myctx "github.com/opensds/opensds/pkg/context"
-)
-
-// Factory returns a fiter function of api request
-func Factory(filename string) beego.FilterFunc {
- swagger, err := openapi3.NewSwaggerLoader().LoadSwaggerFromFile(filename)
- if err != nil {
- glog.Warningf("error loading %s api swagger file: %s", filename, err)
- return func(httpCtx *bctx.Context) {}
- }
-
- // Server is not required for finding route
- swagger.Servers = nil
- router := openapi3filter.NewRouter().WithSwagger(swagger)
- return func(httpCtx *bctx.Context) {
- req := httpCtx.Request
- route, pathParams, err := router.FindRoute(req.Method, req.URL)
- if err != nil {
- glog.Errorf("failed to find route from swagger: %s", err)
- myctx.HttpError(httpCtx, http.StatusBadRequest, "failed to find route %s:%s from swagger: %s", req.Method, req.URL, err)
- }
-
- requestValidationInput := &openapi3filter.RequestValidationInput{
- Request: req,
- PathParams: pathParams,
- Route: route,
- }
- if err := openapi3filter.ValidateRequest(context.Background(), requestValidationInput); err != nil {
- errMsg := ""
- switch e := err.(type) {
- case *openapi3filter.RequestError:
- // Retrieve first line of err message
- errMsg = strings.Split(e.Error(), "\n")[0]
- default:
- errMsg = fmt.Sprintf("%s", err)
- }
- glog.Errorf("invalid request: %s", errMsg)
- myctx.HttpError(httpCtx, http.StatusBadRequest, "%s", errMsg)
- }
- }
-}
diff --git a/pkg/api/main.go b/pkg/api/main.go
deleted file mode 100755
index bcc3dd3ec..000000000
--- a/pkg/api/main.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements a entry into the OpenSDS northbound REST service.
-*/
-
-package api
-
-import (
- "crypto/tls"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/astaxie/beego"
- "github.com/opensds/opensds/pkg/api/filter/accesslog"
- "github.com/opensds/opensds/pkg/api/filter/auth"
- "github.com/opensds/opensds/pkg/api/filter/context"
- "github.com/opensds/opensds/pkg/api/filter/validation"
- cfg "github.com/opensds/opensds/pkg/utils/config"
- "github.com/opensds/opensds/pkg/utils/constants"
-
- // Load the API routers
- _ "github.com/opensds/opensds/pkg/api/routers"
-)
-
-const (
- AddressIdx = iota
- PortIdx
-)
-
-func Run(apiServerCfg cfg.OsdsApiServer) {
-
- if apiServerCfg.HTTPSEnabled {
- if apiServerCfg.BeegoHTTPSCertFile == "" || apiServerCfg.BeegoHTTPSKeyFile == "" {
- fmt.Println("If https is enabled in hotpot, please ensure key file and cert file of the hotpot are not empty.")
- return
- }
- // beego https config
- beego.BConfig.Listen.EnableHTTP = false
- beego.BConfig.Listen.EnableHTTPS = true
- strs := strings.Split(apiServerCfg.ApiEndpoint, ":")
- beego.BConfig.Listen.HTTPSAddr = strs[AddressIdx]
- beego.BConfig.Listen.HTTPSPort, _ = strconv.Atoi(strs[PortIdx])
- beego.BConfig.Listen.HTTPSCertFile = apiServerCfg.BeegoHTTPSCertFile
- beego.BConfig.Listen.HTTPSKeyFile = apiServerCfg.BeegoHTTPSKeyFile
- tlsConfig := &tls.Config{
- MinVersion: tls.VersionTLS12,
- CipherSuites: []uint16{
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- },
- }
-
- beego.BeeApp.Server.TLSConfig = tlsConfig
- }
-
- beego.BConfig.Listen.ServerTimeOut = apiServerCfg.BeegoServerTimeOut
- beego.BConfig.CopyRequestBody = true
- beego.BConfig.EnableErrorsShow = false
- beego.BConfig.EnableErrorsRender = false
- beego.BConfig.WebConfig.AutoRender = false
- // insert some auth rules
- pattern := fmt.Sprintf("/%s/*", constants.APIVersion)
- beego.InsertFilter(pattern, beego.BeforeExec, context.Factory())
- beego.InsertFilter(pattern, beego.BeforeExec, auth.Factory())
- beego.InsertFilter("*", beego.BeforeExec, accesslog.Factory())
- beego.InsertFilter("*", beego.BeforeExec, validation.Factory(apiServerCfg.ApiSpecPath))
-
- // start service
- beego.Run(apiServerCfg.ApiEndpoint)
-}
diff --git a/pkg/api/policy/checks.go b/pkg/api/policy/checks.go
deleted file mode 100644
index e4b185f40..000000000
--- a/pkg/api/policy/checks.go
+++ /dev/null
@@ -1,345 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package policy
-
-import (
- "errors"
- "fmt"
- "reflect"
- "regexp"
- "strings"
-
- log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/utils"
-)
-
-func init() {
- registerAll()
-}
-
-type NewCheckFunc func(kind string, match string) BaseCheck
-
-var registeredChecks map[string]NewCheckFunc
-
-func register(name string, f NewCheckFunc) {
- registeredChecks[name] = f
-}
-
-func registerAll() {
- if registeredChecks == nil {
- registeredChecks = make(map[string]NewCheckFunc)
- }
- register("rule", NewRuleCheck)
- register("role", NewRoleCheck)
- register("generic", NewGenericCheck)
-}
-
-type BaseCheck interface {
- String() string
- Exec(target map[string]string, cred map[string]interface{}, enforcer Enforcer, currentRule string) bool
-}
-
-func check(rule BaseCheck,
- target map[string]string,
- cred map[string]interface{},
- enforcer Enforcer,
- currentRule string) bool {
- ret := rule.Exec(target, cred, enforcer, currentRule)
- log.V(8).Infof("check rules:%s -- %v", rule, ret)
- return ret
-}
-
-func NewFalseCheck() BaseCheck {
- return &FalseCheck{}
-}
-
-type FalseCheck struct{}
-
-func (this *FalseCheck) String() string {
- return "!"
-}
-
-func (this *FalseCheck) Exec(target map[string]string,
- cred map[string]interface{},
- enforcer Enforcer,
- currentRule string) bool {
- return false
-}
-
-func NewTrueCheck() BaseCheck {
- return &TrueCheck{}
-}
-
-type TrueCheck struct {
- rule string
-}
-
-func (this *TrueCheck) String() string {
- return "@"
-}
-
-func (this *TrueCheck) Exec(target map[string]string,
- cred map[string]interface{},
- enforcer Enforcer,
- currentRule string) bool {
- return true
-}
-
-func NewNotCheck(check BaseCheck) *NotCheck {
- return &NotCheck{check}
-}
-
-type NotCheck struct {
- rule BaseCheck
-}
-
-func (this *NotCheck) String() string {
- return fmt.Sprintf("not %s", this.rule)
-}
-
-func (this *NotCheck) Exec(target map[string]string,
- cred map[string]interface{},
- enforcer Enforcer,
- currentRule string) bool {
- return !check(this.rule, target, cred, enforcer, currentRule)
-}
-
-func NewAndCheck(check1 BaseCheck, check2 BaseCheck) *AndCheck {
- ac := &AndCheck{}
- ac.AddCheck(check1)
- ac.AddCheck(check2)
- return ac
-}
-
-type AndCheck struct {
- rules []BaseCheck
-}
-
-func (this *AndCheck) String() string {
- var r []string
- for _, rule := range this.rules {
- r = append(r, rule.String())
- }
- return fmt.Sprintf("(%s)", strings.Join(r, " and "))
-}
-
-func (this *AndCheck) Exec(target map[string]string,
- cred map[string]interface{},
- enforcer Enforcer,
- currentRule string) bool {
- for _, rule := range this.rules {
- if !check(rule, target, cred, enforcer, currentRule) {
- return false
- }
- }
- return true
-}
-
-func (this *AndCheck) AddCheck(rule BaseCheck) *AndCheck {
- this.rules = append(this.rules, rule)
- return this
-}
-
-func NewOrCheck(check1 BaseCheck, check2 BaseCheck) *OrCheck {
- oc := &OrCheck{}
- oc.AddCheck(check1)
- oc.AddCheck(check2)
- return oc
-}
-
-type OrCheck struct {
- rules []BaseCheck
-}
-
-func (this *OrCheck) String() string {
- var r []string
- for _, rule := range this.rules {
- r = append(r, rule.String())
- }
- return fmt.Sprintf("(%s)", strings.Join(r, " or "))
-}
-
-func (this *OrCheck) Exec(target map[string]string,
- cred map[string]interface{},
- enforcer Enforcer,
- currentRule string) bool {
- for _, rule := range this.rules {
- if check(rule, target, cred, enforcer, currentRule) {
- return true
- }
- }
- return false
-}
-
-func (this *OrCheck) AddCheck(rule BaseCheck) *OrCheck {
- this.rules = append(this.rules, rule)
- return this
-}
-
-func (this *OrCheck) PopCheck() (*OrCheck, BaseCheck) {
- x := this.rules[len(this.rules)-1]
- this.rules = this.rules[:len(this.rules)-1]
- return this, x
-}
-
-func NewRuleCheck(kind string, match string) BaseCheck {
- return &RuleCheck{kind, match}
-}
-
-type RuleCheck struct {
- kind string
- match string
-}
-
-func (this *RuleCheck) String() string {
- return fmt.Sprintf("%s:%s", this.kind, this.match)
-}
-
-func (this *RuleCheck) Exec(target map[string]string,
- cred map[string]interface{},
- enforcer Enforcer,
- currentRule string) bool {
- if len(enforcer.Rules) == 0 {
- return false
- }
- return check(enforcer.Rules[this.match], target, cred, enforcer, currentRule)
-}
-
-func keyWorkFormatter(target map[string]string, match string) (string, error) {
- reg := regexp.MustCompile(`%([[:graph:]]+)s`)
- if ms := reg.FindAllString(match, -1); len(ms) == 1 {
- s := ms[0][2 : len(ms[0])-2]
- for key, val := range target {
- if s == key {
- return val, nil
- }
- }
- return "", fmt.Errorf("target key doesn`t match")
- }
- return match, nil
-}
-
-func NewRoleCheck(kind string, match string) BaseCheck {
- return &RoleCheck{kind, match}
-}
-
-type RoleCheck struct {
- kind string
- match string
-}
-
-func (r *RoleCheck) String() string {
- return fmt.Sprintf("%s:%s", r.kind, r.match)
-}
-
-func (r *RoleCheck) Exec(target map[string]string,
- cred map[string]interface{},
- enforcer Enforcer,
- currentRule string) bool {
- match, err := keyWorkFormatter(target, r.match)
- if err != nil {
- return false
- }
- if roles, ok := cred["roles"]; ok {
- for _, role := range roles.([]string) {
- if strings.ToLower(match) == strings.ToLower(role) {
- return true
- }
- }
- }
- return false
-}
-
-func NewGenericCheck(kind string, match string) BaseCheck {
- return &GenericCheck{kind, match}
-}
-
-type GenericCheck struct {
- kind string
- match string
-}
-
-func (g *GenericCheck) String() string {
- return fmt.Sprintf("%s:%s", g.kind, g.match)
-}
-
-func (g *GenericCheck) simpleLiteral(expr string) (string, error) {
- s := fmt.Sprintf("%c%c", expr[0], expr[len(expr)-1])
- if len(expr) >= 2 && (s == "\"\"" || s == "''") {
- return expr[1 : len(expr)-1], nil
- }
- if utils.Contained(strings.ToLower(expr), []string{"true", "false"}) {
- return strings.ToLower(expr), nil
- }
- return "", errors.New("Not support right now")
-}
-
-func (g *GenericCheck) findInMap(testVal interface{}, pathSegs []string, match string) bool {
- if len(pathSegs) == 0 {
- switch testVal.(type) {
- case string:
- return strings.ToLower(match) == strings.ToLower(testVal.(string))
- case bool:
- return strings.ToLower(match) == fmt.Sprint(testVal.(bool))
- default:
- return false
- }
- }
- key, pathSegs := pathSegs[0], pathSegs[1:]
- if val, ok := testVal.(map[string]interface{}); ok {
- testVal = val[key]
- } else {
- return false
- }
- if testVal == nil {
- return false
- }
-
- if reflect.TypeOf(testVal).Kind() == reflect.Slice {
- if vList, ok := testVal.([]interface{}); ok {
- for _, val := range vList {
- if g.findInMap(val, pathSegs, match) {
- return true
- }
- }
- } else {
- for _, val := range testVal.([]string) {
- if g.findInMap(val, pathSegs, match) {
- return true
- }
- }
- }
- return false
- }
- return g.findInMap(testVal, pathSegs, match)
-}
-
-func (g *GenericCheck) Exec(target map[string]string,
- cred map[string]interface{},
- enforcer Enforcer,
- currentRule string) bool {
- match, err := keyWorkFormatter(target, strings.ToLower(g.match))
- if err != nil {
- return false
- }
-
- if testValue, err := g.simpleLiteral(g.kind); err == nil {
- return strings.ToLower(match) == testValue
- }
- if len(cred) == 0 {
- return false
- }
- return g.findInMap(cred, strings.Split(g.kind, "."), match)
-}
diff --git a/pkg/api/policy/checks_test.go b/pkg/api/policy/checks_test.go
deleted file mode 100644
index e3e8cd8e8..000000000
--- a/pkg/api/policy/checks_test.go
+++ /dev/null
@@ -1,383 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package policy
-
-import (
- "fmt"
- "reflect"
- "testing"
-)
-
-func newBoolCheck(result bool) BaseCheck {
- return &boolCheck{false, result}
-}
-
-type boolCheck struct {
- called bool
- result bool
-}
-
-func (b *boolCheck) String() string {
- return fmt.Sprint(b.result)
-}
-
-func (b *boolCheck) Exec(target map[string]string, cred map[string]interface{}, enforcer Enforcer, currentRule string) bool {
- b.called = true
- return b.result
-}
-
-func TestRuleCheck(t *testing.T) {
- enforcer := Enforcer{}
- check := NewRuleCheck("rule", "spam")
- target := map[string]string{"target": "fake"}
- if check.Exec(target, make(map[string]interface{}), enforcer, "") {
- t.Error("RuleCheck missing rule test failed")
- }
-
- enforcer.Rules = make(map[string]BaseCheck)
- enforcer.Rules["spam"] = newBoolCheck(false)
- if check.Exec(target, make(map[string]interface{}), enforcer, "") {
- t.Error("RuleCheck rule false test failed")
- }
-
- enforcer.Rules = make(map[string]BaseCheck)
- enforcer.Rules["spam"] = newBoolCheck(true)
- if !check.Exec(target, make(map[string]interface{}), enforcer, "") {
- t.Error("RuleCheck rule true test failed")
- }
-}
-
-func TestRoleCheck(t *testing.T) {
- enforcer := Enforcer{}
- // Test Case 1
- check := NewRoleCheck("role", "sPaM")
- if !check.Exec(map[string]string{},
- map[string]interface{}{"roles": []string{"SpAm"}},
- enforcer, "") {
- t.Error("RoleCheck role accept test failed")
- }
- //Test case 2
- check = NewRoleCheck("role", "spam")
- if check.Exec(map[string]string{},
- map[string]interface{}{"roles": []string{}},
- enforcer, "") {
- t.Error("RoleCheck role reject test failed")
- }
-
- //Test case 3
- check = NewRoleCheck("role", "%(target.role.name)s")
- if !check.Exec(map[string]string{"target.role.name": "a"},
- map[string]interface{}{"user": "user", "roles": []string{"a", "b", "c"}},
- enforcer, "") {
- t.Error("RoleCheck format value key exist test failed")
- }
-
- //Test case 4
- check = NewRoleCheck("role", "%(target.role.name)s")
- if check.Exec(map[string]string{"target.role.name": "d"},
- map[string]interface{}{"user": "user", "roles": []string{"a", "b", "c"}},
- enforcer, "") {
- t.Error("RoleCheck format value key doesn`t exist test failed")
- }
-
- //Test case 5
- check = NewRoleCheck("role", "%(target.role.name)s")
- if check.Exec(map[string]string{},
- map[string]interface{}{},
- enforcer, "") {
- t.Error("RoleCheck format no roles test failed")
- }
-}
-
-func TestGenericCheck(t *testing.T) {
- enforcer := Enforcer{}
- // Test case 1: no cred check.
- check := NewGenericCheck("name", "%(name)s")
- if check.Exec(map[string]string{"name": "spam"},
- map[string]interface{}{},
- enforcer, "") {
- t.Error("GenericCheck no cred test failed")
- }
- // Test case 2: no cred check.
- if check.Exec(map[string]string{"name": "spam"},
- map[string]interface{}{"name": "ham"},
- enforcer, "") {
- t.Error("GenericCheck cred mismatch test failed")
- }
- // Test case 3: accept.
- if !check.Exec(map[string]string{"name": "spam"},
- map[string]interface{}{"name": "spam"},
- enforcer, "") {
- t.Error("GenericCheck cred mismatch test failed")
- }
- // Test case 4: no key match in target.
- if check.Exec(map[string]string{"name1": "spam"},
- map[string]interface{}{"name": "spam"},
- enforcer, "") {
- t.Error("GenericCheck no key match in target test failed")
- }
-
- // Test case 5: no key match in target.
- check = NewGenericCheck("'spam'", "%(name)s")
- if !check.Exec(map[string]string{"name": "spam"},
- map[string]interface{}{},
- enforcer, "") {
- t.Error("GenericCheck no key match in target test failed")
- }
-
- // Test case 6: constant literal mismatch.
- check = NewGenericCheck("'spam'", "%(name)s")
- if !check.Exec(map[string]string{"name": "spam"},
- map[string]interface{}{},
- enforcer, "") {
- t.Error("GenericCheck no key match in target test failed")
- }
-
- //Test case 7: test_constant_literal_mismatch
- check = NewGenericCheck("True", "%(enabled)s")
- if check.Exec(map[string]string{"enabled": "False"},
- map[string]interface{}{},
- enforcer, "") {
- t.Error("GenericCheck no key match in target test failed")
- }
- // Test case 8: test_constant_literal_accept
- check = NewGenericCheck("True", "%(enabled)s")
- if !check.Exec(map[string]string{"enabled": "True"},
- map[string]interface{}{},
- enforcer, "") {
- t.Error("GenericCheck no key match in target test failed")
- }
-
- // Test case 9: test_constant_literal_accept
- check = NewGenericCheck("a.b.c.d", "APPLES")
- cred := map[string]interface{}{
- "a": map[string]interface{}{
- "b": map[string]interface{}{
- "c": map[string]interface{}{
- "d": "APPLES",
- },
- },
- },
- }
- if !check.Exec(map[string]string{"enabled": "True"},
- cred,
- enforcer, "") {
- t.Error("GenericCheck no key match in target test failed")
- }
-
- cred = map[string]interface{}{
- "a": "APPLES",
- "o": map[string]interface{}{
- "t": "ORANGES",
- },
- }
- // Test case 10: test_missing_credentials_dictionary_lookup
- check = NewGenericCheck("o.t", "ORANGES")
- if !check.Exec(map[string]string{},
- cred,
- enforcer, "") {
- t.Error("GenericCheck no key match in target test failed")
- }
-
- // Test case 11: test_missing_credentials_dictionary_lookup
- check = NewGenericCheck("o.v", "ORANGES")
- if check.Exec(map[string]string{},
- cred,
- enforcer, "") {
- t.Error("GenericCheck no key match in target test failed")
- }
- // Test case 12: test_missing_credentials_dictionary_lookup
- check = NewGenericCheck("q.v", "ORANGES")
- if check.Exec(map[string]string{},
- cred,
- enforcer, "") {
- t.Error("GenericCheck no key match in target test failed")
- }
-
- // Test case 13: test_single_entry_in_list_accepted
- cred = map[string]interface{}{
- "a": map[string]interface{}{
- "b": map[string]interface{}{
- "c": map[string]interface{}{
- "d": []string{"APPLES"},
- },
- },
- },
- }
- check = NewGenericCheck("a.b.c.d", "APPLES")
- if !check.Exec(map[string]string{},
- cred,
- enforcer, "") {
- t.Error("GenericCheck no key match in target test failed")
- }
- // Test case 14: test_multiple_entry_in_list_accepted
- cred = map[string]interface{}{
- "a": map[string]interface{}{
- "b": map[string]interface{}{
- "c": map[string]interface{}{
- "d": []string{"Bananas", "APPLES", "Grapes"},
- },
- },
- },
- }
- check = NewGenericCheck("a.b.c.d", "APPLES")
- if !check.Exec(map[string]string{},
- cred,
- enforcer, "") {
- t.Error("GenericCheck no key match in target test failed")
- }
-
- //Test case 15: test_multiple_entry_in_list_accepted
- cred = map[string]interface{}{
- "a": map[string]interface{}{
- "b": []interface{}{
- map[string]interface{}{
- "c": map[string]interface{}{
- "d": []string{"Bananas", "APPLES", "Grapes"},
- },
- },
- },
- },
- }
- check = NewGenericCheck("a.b.c.d", "APPLES")
- if !check.Exec(map[string]string{},
- cred,
- enforcer, "") {
- t.Error("GenericCheck no key match in target test failed")
- }
-}
-
-func TestFalseCheck(t *testing.T) {
- check := NewFalseCheck()
- if "!" != check.String() {
- t.Errorf("FalseCheck failed.")
- }
- enforcer := Enforcer{}
- if check.Exec(map[string]string{}, map[string]interface{}{}, enforcer, "") {
- t.Errorf("FalseCheck failed.")
- }
-}
-
-func TestTrueCheck(t *testing.T) {
- check := NewTrueCheck()
- if "@" != check.String() {
- t.Errorf("TrueCheck failed.")
- }
- enforcer := Enforcer{}
- if !check.Exec(map[string]string{}, map[string]interface{}{}, enforcer, "") {
- t.Errorf("TrueCheck failed.")
- }
-}
-
-func TestNotCheck(t *testing.T) {
- enforcer := Enforcer{}
- check := NewNotCheck(NewTrueCheck())
- if "not @" != check.String() {
- t.Errorf("NotCheck string test failed.")
- }
- if check.Exec(map[string]string{}, map[string]interface{}{}, enforcer, "") {
- t.Errorf("NotCheck exeute test failed.")
- }
-}
-
-func TestAndCheck(t *testing.T) {
- // Test case 1
- c1 := NewTrueCheck()
- c2 := NewTrueCheck()
- c3 := NewTrueCheck()
- check := NewAndCheck(c1, c2)
- if !reflect.DeepEqual(check.rules, []BaseCheck{c1, c2}) {
- t.Errorf("AndCheck new test failed")
- }
- check.AddCheck(c3)
- if !reflect.DeepEqual(check.rules, []BaseCheck{c1, c2, c3}) {
- t.Errorf("AndCheck add check test failed")
- }
- if check.String() != "(@ and @ and @)" {
- t.Errorf("AndCheck string test failed")
- }
- //first true
- b1 := newBoolCheck(true)
- b2 := newBoolCheck(false)
- check = NewAndCheck(b1, b2)
- if check.Exec(map[string]string{}, map[string]interface{}{}, Enforcer{}, "") {
- t.Errorf("AndCheck call first true test failed")
- }
- if !(check.rules[0].(*boolCheck).called && check.rules[1].(*boolCheck).called) {
- t.Errorf("AndCheck call first true test failed")
- }
-
- // second true
- b1 = newBoolCheck(false)
- b2 = newBoolCheck(true)
- check = NewAndCheck(b1, b2)
- if check.Exec(map[string]string{}, map[string]interface{}{}, Enforcer{}, "") {
- t.Errorf("AndCheck call second true test failed")
- }
- if !(check.rules[0].(*boolCheck).called && !check.rules[1].(*boolCheck).called) {
- t.Errorf("AndCheck call second true test failed")
- }
-}
-
-func TestOrCheck(t *testing.T) {
- // Test case 1
- c1 := NewTrueCheck()
- c2 := NewTrueCheck()
- c3 := NewTrueCheck()
- check := NewOrCheck(c1, c2)
- if !reflect.DeepEqual(check.rules, []BaseCheck{c1, c2}) {
- t.Errorf("OrCheck new test failed")
- }
- check.AddCheck(c3)
- if !reflect.DeepEqual(check.rules, []BaseCheck{c1, c2, c3}) {
- t.Errorf("OrCheck add check test failed")
- }
- if check.String() != "(@ or @ or @)" {
- t.Errorf("OrCheck string test failed")
- }
- _, check1 := check.PopCheck()
- if !reflect.DeepEqual(check.rules, []BaseCheck{c1, c2}) {
- t.Errorf("OrCheck pop check test failed")
- }
- if !reflect.DeepEqual(check1, c3) {
- t.Errorf("OrCheck pop check test failed")
- }
- // all false
- check = NewOrCheck(newBoolCheck(false), newBoolCheck(false))
- if check.Exec(map[string]string{}, map[string]interface{}{}, Enforcer{}, "") {
- t.Errorf("OrCheck call all false test failed")
- }
- if !(check.rules[0].(*boolCheck).called && check.rules[1].(*boolCheck).called) {
- t.Errorf("OrCheck call all false test failed")
- }
-
- // first false
- check = NewOrCheck(newBoolCheck(false), newBoolCheck(true))
- if !check.Exec(map[string]string{}, map[string]interface{}{}, Enforcer{}, "") {
- t.Errorf("OrCheck call first false test failed")
- }
- if !(check.rules[0].(*boolCheck).called && check.rules[1].(*boolCheck).called) {
- t.Errorf("OrCheck call first false test failed")
- }
-
- // second false
- check = NewOrCheck(newBoolCheck(true), newBoolCheck(false))
- if !check.Exec(map[string]string{}, map[string]interface{}{}, Enforcer{}, "") {
- t.Errorf("OrCheck call second false test failed")
- }
- if !(check.rules[0].(*boolCheck).called && !check.rules[1].(*boolCheck).called) {
- t.Errorf("OrCheck call second false test failed")
- }
-}
diff --git a/pkg/api/policy/parser.go b/pkg/api/policy/parser.go
deleted file mode 100644
index 5b0267173..000000000
--- a/pkg/api/policy/parser.go
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package policy
-
-import (
- "fmt"
- "strings"
-
- "github.com/opensds/opensds/pkg/utils"
-)
-
-type TokenPair struct {
- token string
- value interface{}
-}
-
-func parseCheck(rule string) BaseCheck {
- if rule == "!" {
- return &FalseCheck{}
- } else if rule == "@" {
- return &TrueCheck{}
- }
- items := strings.SplitN(rule, ":", 2)
- if len(items) != 2 {
- return &FalseCheck{}
- }
- kind, match := items[0], items[1]
- if check, ok := registeredChecks[kind]; ok {
- return check(kind, match)
- } else if check, ok := registeredChecks["generic"]; ok {
- return check(kind, match)
- } else {
- return &FalseCheck{}
- }
-}
-
-func parseTokenize(rule string) []TokenPair {
- var tokPairs []TokenPair
- for _, tok := range strings.Fields(rule) {
- if tok == "" {
- continue
- }
-
- clean := strings.TrimLeft(tok, "(")
- for i := 0; i < len(tok)-len(clean); i++ {
- tokPairs = append(tokPairs, TokenPair{"(", "("})
- }
-
- // If it was only parentheses, continue
- if clean == "" {
- continue
- }
-
- tok = clean
- // Handle trailing parens on the token
- clean = strings.TrimRight(tok, ")")
- trail := len(tok) - len(clean)
- lowered := strings.ToLower(clean)
-
- if utils.Contained(lowered, []string{"and", "or", "not"}) {
- tokPairs = append(tokPairs, TokenPair{lowered, clean})
- } else if clean != "" {
- s := fmt.Sprintf("%c%c", tok[0], tok[len(tok)-1])
- if len(tok) >= 2 && (s == "\"\"" || s == "''") {
- tokPairs = append(tokPairs, TokenPair{"string", tok[1 : len(tok)-1]})
- } else {
- tokPairs = append(tokPairs, TokenPair{"check", parseCheck(clean)})
- }
- }
-
- for i := 0; i < trail; i++ {
- tokPairs = append(tokPairs, TokenPair{")", ")"})
- }
- }
-
- return tokPairs
-}
-
-func parseRule(rule string) BaseCheck {
- if rule == "" {
- return &TrueCheck{}
- }
- state := NewParseState()
- tokPairs := parseTokenize(rule)
- for _, tp := range tokPairs {
- state.Shift(tp.token, tp.value)
- }
- if result, err := state.Result(); err == nil {
- return result.(BaseCheck)
- }
- return &FalseCheck{}
-}
-
-var ReduceFuncMap = map[string]ReduceFunc{
- "(,check,)": wrapCheck,
- "(,and_expr,)": wrapCheck,
- "(,or_expr,)": wrapCheck,
- "check,and,check": makeAndExpr,
- "or_expr,and,check": mixOrAndExpr,
- "and_expr,and,check": extendAndExpr,
- "check,or,check": makeOrExpr,
- "and_expr,or,check": makeOrExpr,
- "or_expr,or,check": extendOrExpr,
- "not,check": makeNotExpr,
-}
-
-func NewParseState() *ParseState {
- return &ParseState{}
-}
-
-type ParseState struct {
- tokens []string
- values []interface{}
-}
-
-type ReduceFunc func(args ...interface{}) []TokenPair
-
-func (p *ParseState) reduce() {
- tokenStr := strings.Join(p.tokens, ",")
- for key, fun := range ReduceFuncMap {
- if strings.HasSuffix(tokenStr, key) {
- argNum := strings.Count(key, ",") + 1
- argIdx := len(p.values) - argNum
- args := p.values[argIdx:]
- results := fun(args...)
- p.tokens = append(p.tokens[:argIdx], results[0].token)
- p.values = append(p.values[:argIdx], results[0].value)
- p.reduce()
- }
- }
-}
-
-func (p *ParseState) Shift(tok string, val interface{}) {
- p.tokens = append(p.tokens, tok)
- p.values = append(p.values, val)
- p.reduce()
-}
-
-func (p *ParseState) Result() (interface{}, error) {
- if len(p.values) != 1 {
- return nil, fmt.Errorf("Could not parse rule")
- }
- return p.values[0], nil
-}
-
-func wrapCheck(args ...interface{}) []TokenPair {
- check := args[1].(BaseCheck)
- return []TokenPair{{"check", check}}
-}
-
-func makeAndExpr(args ...interface{}) []TokenPair {
- check1 := args[0].(BaseCheck)
- check2 := args[2].(BaseCheck)
- return []TokenPair{{"and_expr", NewAndCheck(check1, check2)}}
-}
-
-func mixOrAndExpr(args ...interface{}) []TokenPair {
- orExpr := args[0].(*OrCheck)
- check := args[2].(BaseCheck)
- var andExpr *AndCheck
- orExpr, check1 := orExpr.PopCheck()
- if v, ok := check1.(*AndCheck); ok {
- andExpr = v
- andExpr.AddCheck(check)
- } else {
- andExpr = NewAndCheck(check1, check)
- }
- return []TokenPair{{"or_expr", orExpr.AddCheck(andExpr)}}
-}
-
-func extendAndExpr(args ...interface{}) []TokenPair {
- andExpr := args[0].(*AndCheck)
- check2 := args[2].(BaseCheck)
- return []TokenPair{{"and_expr", andExpr.AddCheck(check2)}}
-}
-
-func makeOrExpr(args ...interface{}) []TokenPair {
- check1 := args[0].(BaseCheck)
- check2 := args[2].(BaseCheck)
- return []TokenPair{{"or_expr", NewOrCheck(check1, check2)}}
-}
-
-func extendOrExpr(args ...interface{}) []TokenPair {
- orExpr := args[0].(*OrCheck)
- check := args[2].(BaseCheck)
- return []TokenPair{{"or_expr", orExpr.AddCheck(check)}}
-}
-
-func makeNotExpr(args ...interface{}) []TokenPair {
- return []TokenPair{{"check", NewNotCheck(args[1].(BaseCheck))}}
-}
diff --git a/pkg/api/policy/parser_test.go b/pkg/api/policy/parser_test.go
deleted file mode 100644
index 59649af81..000000000
--- a/pkg/api/policy/parser_test.go
+++ /dev/null
@@ -1,269 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package policy
-
-import (
- "reflect"
- "testing"
-)
-
-func TestParseCheck(t *testing.T) {
- check := parseCheck("!")
- if reflect.TypeOf(check) != reflect.TypeOf(&FalseCheck{}) {
- t.Errorf("Parse check \"!\" failed")
- }
-
- check = parseCheck("@")
- if reflect.TypeOf(check) != reflect.TypeOf(&TrueCheck{}) {
- t.Errorf("Parse check \"@\" failed")
- }
-
- check = parseCheck("rule:handler")
- if reflect.TypeOf(check) != reflect.TypeOf(&RuleCheck{}) {
- t.Errorf("Parse check rule failed")
- }
-
- check = parseCheck("role:handler")
- if reflect.TypeOf(check) != reflect.TypeOf(&RoleCheck{}) {
- t.Errorf("Parse check role failed")
- }
- check = parseCheck("no:handler")
- if reflect.TypeOf(check) != reflect.TypeOf(&GenericCheck{}) {
- t.Errorf("Parse check generic failed")
- }
- check = parseCheck("foobar")
- if reflect.TypeOf(check) != reflect.TypeOf(&FalseCheck{}) {
- t.Errorf("Parse check bad rule failed")
- }
- delete(registeredChecks, "Generic")
- check = parseCheck("foobar")
- if reflect.TypeOf(check) != reflect.TypeOf(&FalseCheck{}) {
- t.Errorf("Parse check bad rule failed")
- }
-}
-func TestParseTokenize(t *testing.T) {
- exemplar := ("(( ( ((() And)) or ) (check:%(miss)s) not)) 'a-string' \"another-string\"")
- results := parseTokenize(exemplar)
- expected := []TokenPair{
- {"(", "("}, {"(", "("}, {"(", "("}, {"(", "("},
- {"(", "("}, {"(", "("}, {")", ")"}, {"and", "And"},
- {")", ")"}, {")", ")"}, {"or", "or"}, {")", ")"},
- {"(", "("}, {"check", "check:%(miss)s"}, {")", ")"},
- {"not", "not"}, {")", ")"}, {")", ")"},
- {"string", "a-string"}, {"string", "another-string"}}
- // please synchronized update the index when if modified the unit test.
- results[13].value = "check:%(miss)s"
- for i := range results {
- if results[i].token != expected[i].token || results[i].value != expected[i].value {
- t.Errorf("Test parseTokenize failed, results:%v, expected:%v", results[i], expected[i])
- }
- }
-}
-
-func TestParseState(t *testing.T) {
- state := NewParseState()
- state.tokens = []string{"tok2"}
- state.values = []interface{}{"val2"}
- state.reduce()
-}
-
-func TestParseRule(t *testing.T) {
- // test case 1: "a or b or c".
- result := parseRule("@ or ! or @")
- if result.String() != "(@ or ! or @)" {
- t.Error("Parse rule in 'a or b or c' case failed")
- }
-
- // test case 2: "a or b and c".
- result = parseRule("@ or ! and @")
- if result.String() != "(@ or (! and @))" {
- t.Error("Parse rule in 'a or b and c' case failed")
- }
-
- // test case 3: "a and b or c".
- result = parseRule("@ and ! or @")
- if result.String() != "((@ and !) or @)" {
- t.Error("Parse rule in 'a and b or c' case failed")
- }
-
- // test case 4: "a and b and c".
- result = parseRule("@ and ! and @")
- if result.String() != "(@ and ! and @)" {
- t.Error("Parse rule in 'a and b and c' case failed")
- }
-
- // test case 5: "a or b or c or d" .
- result = parseRule("@ or ! or @ or !")
- if result.String() != "(@ or ! or @ or !)" {
- t.Error("Parse rule in 'a or b or c or d' case failed")
- }
-
- // test case 6: "a or b or c and d" .
- result = parseRule("@ or ! or @ and !")
- if result.String() != "(@ or ! or (@ and !))" {
- t.Error("Parse rule in 'a or b or c and d' case failed")
- }
-
- // test case 7: "a or b and c or d" .
- result = parseRule("@ or ! and @ or !")
- if result.String() != "(@ or (! and @) or !)" {
- t.Error("Parse rule in 'a or b and c or d' case failed")
- }
-
- // test case 8: "a or b and c and d" .
- result = parseRule("@ or ! and @ and !")
- if result.String() != "(@ or (! and @ and !))" {
- t.Error("Parse rule in 'a or b and c and d' case failed")
- }
-
- // test case 9: "a and b or c or d" .
- result = parseRule("@ and ! or @ or !")
- if result.String() != "((@ and !) or @ or !)" {
- t.Error("Parse rule in 'a and b or c or d' case failed")
- }
-
- // test case 10: "a and b or c and d" .
- result = parseRule("@ and ! or @ and !")
- if result.String() != "((@ and !) or (@ and !))" {
- t.Error("Parse rule in 'a and b or c and d' case failed")
- }
-
- // test case 11: "a and b and c or d" .
- result = parseRule("@ and ! and @ or !")
- if result.String() != "((@ and ! and @) or !)" {
- t.Error("Parse rule in 'a and b and c or d' case failed")
- }
-
- // test case 12: "a and b and c and d" .
- result = parseRule("@ and ! and @ and !")
- if result.String() != "(@ and ! and @ and !)" {
- t.Error("Parse rule in 'a and b and c and d' case failed")
- }
-
- // test case 13: "a and b or with not 1" .
- result = parseRule("not @ and ! or @")
- if result.String() != "((not @ and !) or @)" {
- t.Error("Parse rule in 'a and b or with not 1' case failed")
- }
-
- // test case 14: "a and b or with not 2" .
- result = parseRule("@ and not ! or @")
- if result.String() != "((@ and not !) or @)" {
- t.Error("Parse rule in 'a and b or with not 2' case failed")
- }
-
- // test case 15: "a and b or with not 3" .
- result = parseRule("@ and ! or not @")
- if result.String() != "((@ and !) or not @)" {
- t.Error("Parse rule in 'a and b or with not 3' case failed")
- }
-
- // test case 16: "a and b and c with group" .
- rules := []string{
- "@ and ( ! ) or @",
- "@ and ! or ( @ )",
- "( @ ) and ! or ( @ )",
- "@ and ( ! ) or ( @ )",
- "( @ ) and ( ! ) or ( @ )",
- "( @ and ! ) or @",
- "( ( @ ) and ! ) or @",
- "( @ and ( ! ) ) or @",
- "( ( @ and ! ) ) or @",
- "( @ and ! or @ )"}
- for _, r := range rules {
- result = parseRule(r)
- if result.String() != "((@ and !) or @)" {
- t.Error("Parse rule in 'a and b and c with group' case failed")
- }
- }
-
- // test case 17: "a and b or c with group and not" .
- rules = []string{
- "not ( @ ) and ! or @",
- "not @ and ( ! ) or @",
- "not @ and ! or ( @ )",
- "( not @ ) and ! or @",
- "( not @ and ! ) or @",
- "( not @ and ! or @ )"}
-
- for _, r := range rules {
- result = parseRule(r)
- if result.String() != "((not @ and !) or @)" {
- t.Error("Parse rule in 'a and b and c with group and not'case failed")
- }
- }
-
- // test case 18: "a and b or c with group and not 2" .
- result = parseRule("not @ and ( ! or @ )")
- if result.String() != "(not @ and (! or @))" {
- t.Error("Parse rule in 'a and b or c with group and not 2' case failed")
- }
-
- // test case 19: "a and b or c with group and not 3" .
- result = parseRule("not ( @ and ! or @ )")
- if result.String() != "not ((@ and !) or @)" {
- t.Error("Parse rule in 'a and b or c with group and not 3' case failed")
- }
-
- // test case 20: "a and b or c with group and not 4" .
- rules = []string{
- "( @ ) and not ! or @",
- "@ and ( not ! ) or @",
- "@ and not ( ! ) or @",
- "@ and not ! or ( @ )",
- "( @ and not ! ) or @",
- "( @ and not ! or @ )"}
-
- for _, r := range rules {
- result = parseRule(r)
- if result.String() != "((@ and not !) or @)" {
- t.Error("Parse rule in 'a and b and c with group and not 4'case failed")
- }
- }
-
- // test case 21: "a and b or c with group and not 5" .
- result = parseRule("@ and ( not ! or @ )")
- if result.String() != "(@ and (not ! or @))" {
- t.Error("Parse rule in 'a and b or c with group and not 5' case failed")
- }
-
- // test case 22: "a and b or c with group and not 6" .
- result = parseRule("@ and not ( ! or @ )")
- if result.String() != "(@ and not (! or @))" {
- t.Error("Parse rule in 'a and b or c with group and not 6' case failed")
- }
-
- // test case 23: "a and b or c with group and not 7" .
- rules = []string{
- "( @ ) and ! or not @",
- "@ and ( ! ) or not @",
- "@ and ! or not ( @ )",
- "@ and ! or ( not @ )",
- "( @ and ! ) or not @",
- "( @ and ! or not @ )"}
-
- for _, r := range rules {
- result = parseRule(r)
- if result.String() != "((@ and !) or not @)" {
- t.Error("Parse rule in 'a and b and c with group and not 7'case failed")
- }
- }
-
- // test case 24: "a and b or c with group and not 8" .
- result = parseRule("@ and ( ! or not @ )")
- if result.String() != "(@ and (! or not @))" {
- t.Error("Parse rule in 'a and b or c with group and not 8' case failed")
- }
-}
diff --git a/pkg/api/policy/policy.go b/pkg/api/policy/policy.go
deleted file mode 100644
index 608cd4e6e..000000000
--- a/pkg/api/policy/policy.go
+++ /dev/null
@@ -1,212 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package policy
-
-import (
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
- "os"
- "strings"
-
- bctx "github.com/astaxie/beego/context"
- log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/utils"
- "github.com/opensds/opensds/pkg/utils/config"
-)
-
-var enforcer *Enforcer
-
-func init() {
- enforcer = NewEnforcer(false)
- RegisterRules(enforcer)
- enforcer.LoadRules(false)
-}
-
-type DefaultRule struct {
- Name string
- CheckStr string
-}
-
-func listRules() []DefaultRule {
- return []DefaultRule{
- {Name: "context_is_admin", CheckStr: "role:admin"},
- }
-}
-
-func RegisterRules(e *Enforcer) {
- e.RegisterDefaults(listRules())
-}
-
-func NewEnforcer(overWrite bool) *Enforcer {
- return &Enforcer{OverWrite: overWrite}
-}
-
-type Enforcer struct {
- Rules map[string]BaseCheck
- DefaultRules []DefaultRule
- OverWrite bool
-}
-
-func (e *Enforcer) RegisterDefaults(rules []DefaultRule) {
- e.DefaultRules = rules
-}
-
-func (e *Enforcer) Enforce(rule string, target map[string]string, cred map[string]interface{}) (bool, error) {
- if err := e.LoadRules(false); err != nil {
- return false, err
- }
-
- toRule, ok := e.Rules[rule]
- if !ok {
- err := fmt.Errorf("rule [%s] does not exist", rule)
- return false, err
- }
- return check(toRule, target, cred, *e, ""), nil
-}
-
-func (e *Enforcer) Authorize(rule string, target map[string]string, cred map[string]interface{}) (bool, error) {
- return e.Enforce(rule, target, cred)
-}
-
-func (e *Enforcer) LoadRules(forcedReload bool) error {
- path := config.CONF.OsdsApiServer.PolicyPath
- fileInfo, err := os.Stat(path)
- if err != nil {
- return err
- }
- // Load all policy files that in the specified path
- if fileInfo.IsDir() {
- files, err := ioutil.ReadDir(path)
- if err != nil {
- return err
- }
- for _, f := range files {
- if !f.IsDir() && strings.HasSuffix(f.Name(), ".json") {
- err := e.LoadPolicyFile(path, forcedReload, false)
- if err != nil {
- return err
- }
- }
- }
- return nil
- }
- return e.LoadPolicyFile(path, forcedReload, e.OverWrite)
-}
-
-func (e *Enforcer) UpdateRules(rules map[string]BaseCheck) {
- if e.Rules == nil {
- e.Rules = make(map[string]BaseCheck)
- }
- for k, c := range rules {
- e.Rules[k] = c
- }
-}
-
-func (e *Enforcer) LoadPolicyFile(path string, forcedReload bool, overWrite bool) error {
- // if rules is already set or user doesn't want to force reload, return it.
- if e.Rules != nil && !forcedReload {
- return nil
- }
-
- data, err := ioutil.ReadFile(path)
- if err != nil {
- msg := fmt.Sprintf("Read policy file (%s) failed, reason:(%v)", path, err)
- log.Error(msg)
- return fmt.Errorf(msg)
- }
-
- r, err := NewRules(data, e.DefaultRules)
- if err != nil {
- return err
- }
-
- if overWrite {
- e.Rules = r.Rules
- } else {
- e.UpdateRules(r.Rules)
- }
- return nil
-}
-
-func NewRules(data []byte, defaultRule []DefaultRule) (*Rules, error) {
- r := &Rules{}
- err := r.Load(data, defaultRule)
- return r, err
-}
-
-type Rules struct {
- Rules map[string]BaseCheck
-}
-
-func (r *Rules) Load(data []byte, defaultRules []DefaultRule) error {
- rulesMap := map[string]string{}
- err := json.Unmarshal(data, &rulesMap)
- if err != nil {
- log.Error(err.Error())
- return err
- }
- // add default value
- for _, r := range defaultRules {
- if v, ok := rulesMap[r.Name]; ok {
- log.Warningf("Policy rule (%s:%s) has conflict with default rule(%s:%s),abandon default value\n",
- r.Name, v, r.Name, r.CheckStr)
- } else {
- rulesMap[r.Name] = r.CheckStr
- }
- }
-
- if r.Rules == nil {
- r.Rules = make(map[string]BaseCheck)
- }
- for k, v := range rulesMap {
- r.Rules[k] = parseRule(v)
- }
- return nil
-}
-
-func (r *Rules) String() string {
- b, _ := json.MarshalIndent(r.Rules, "", " ")
- return string(b)
-}
-
-func Authorize(httpCtx *bctx.Context, action string) bool {
- if config.CONF.AuthStrategy != "keystone" {
- return true
- }
- ctx := context.GetContext(httpCtx)
- credentials := ctx.ToPolicyValue()
- TenantId := httpCtx.Input.Param(":tenantId")
-
- target := map[string]string{
- "tenant_id": TenantId,
- }
-
- log.V(8).Infof("Action: %v", action)
- log.V(8).Infof("Target: %v", target)
- log.V(8).Infof("Credentials: %v", credentials)
- ok, err := enforcer.Authorize(action, target, credentials)
- if err != nil {
- log.Errorf("Authorize failed, %s", err)
- }
- if !ok {
- context.HttpError(httpCtx, http.StatusForbidden, "operation is not permitted")
- } else {
- ctx.IsAdmin = utils.Contained("admin", ctx.Roles)
- }
- return ok
-}
diff --git a/pkg/api/policy/policy_test.go b/pkg/api/policy/policy_test.go
deleted file mode 100644
index 90c4ae179..000000000
--- a/pkg/api/policy/policy_test.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package policy
-
-import (
- "encoding/json"
- "fmt"
- "io/ioutil"
- "strings"
- "testing"
- "time"
-)
-
-type Project struct {
- ID string `json:"id"`
- Name string `json:"name"`
-}
-
-type User struct {
- ID string `json:"id"`
- Name string `json:"name"`
-}
-
-type Roles struct {
- ID string `json:"id"`
- Name string `json:"name"`
-}
-
-type Token struct {
- AuditIds []string `json:"audit_ids"`
- Catalog []interface{} `json:"catalog"`
- ID string `json:"id"`
- ExpiresAt time.Time `json:"expires_at"`
- IsAdmin bool `json:"is_domain"`
- Project Project `json:"project"`
- User User `json:"user"`
- Roles []Roles `json:"roles"`
-}
-
-func TestPolicy(t *testing.T) {
- p := "./testdata/token.json"
- body, err := ioutil.ReadFile(p)
- if err != nil {
- fmt.Printf("Read token json file (%s) failed, reason:(%v)\n", p, err)
- return
- }
-
- var m map[string]interface{}
- err = json.Unmarshal([]byte(body), &m)
- var to Token
- b, err := json.Marshal(m["token"])
- err = json.Unmarshal(b, &to)
-
- p = "./testdata/policy.json"
- data, err := ioutil.ReadFile(p)
- if err != nil {
- fmt.Printf("Read token json file (%s) failed, reason:(%v)\n", p, err)
- return
- }
- target := map[string]string{"project_id": to.Project.ID}
-
- var roles []string
- for _, v := range to.Roles {
- roles = append(roles, v.Name)
- }
- cred := map[string]interface{}{
- "roles": roles,
- "project_id": to.Project.ID,
- "is_admin": to.IsAdmin,
- }
- // The golang testing framework dosen't invoke init function in linux system,so invoke it.
- registerAll()
- rules, _ := NewRules(data, listRules())
- enforcer := NewEnforcer(false)
- enforcer.Rules = rules.Rules
- expected := map[string]bool{
- "volume:create": true,
- "volume:delete": true,
- "volume:get": false,
- "volume:get_all": true,
- }
- for k, r := range rules.Rules {
- if strings.Contains(k, ":") {
- result := r.Exec(target, cred, *enforcer, "")
- if result != expected[k] {
- t.Errorf("Policy checked failed,\"%s\": \"%s\", expected:%v, got:%v", k, r, expected[k], result)
- }
- }
- }
-}
diff --git a/pkg/api/policy/testdata/policy.json b/pkg/api/policy/testdata/policy.json
deleted file mode 100644
index 671329652..000000000
--- a/pkg/api/policy/testdata/policy.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "admin_or_owner": "is_admin:True or (role:admin and is_admin_project:True) or project_id:%(project_id)s",
- "default": "rule:admin_or_owner",
- "admin_api": "is_admin:True or (role:admin and is_admin_project:True)",
-
- "volume:create": "",
- "volume:delete": "rule:admin_or_owner",
- "volume:get": "rule:admin_api",
- "volume:get_all": "rule:default"
-}
diff --git a/pkg/api/policy/testdata/token.json b/pkg/api/policy/testdata/token.json
deleted file mode 100644
index 77d6d2f3c..000000000
--- a/pkg/api/policy/testdata/token.json
+++ /dev/null
@@ -1,135 +0,0 @@
-{
- "token": {
- "methods": [
- "password"
- ],
- "roles": [
- {
- "name": "member",
- "id": "bb8d0f54-a1c9-444f-81da-231aef4d7e03"
- }
- ],
- "expires_at": "2038-01-18T21:14:07Z",
- "issued_at": "2000-01-18T21:14:07Z",
- "project": {
- "id": "tenant_id1",
- "domain": {
- "id": "domain_id1",
- "name": "domain_name1"
- },
- "enabled": true,
- "description": null,
- "name": "tenant_name1"
- },
- "catalog": [
- {
- "endpoints": [
- {
- "id": "f84e070735e54914b41e2b5cfa94dcf7",
- "interface": "admin",
- "url": "http://127.0.0.1:8776/v1/64b6f3fbcc53435e8a60fcf89bb6617a",
- "region": "regionOne"
- },
- {
- "id": "8220bba1d2844e0b81b171c6ede1155f",
- "interface": "internal",
- "url": "http://127.0.0.1:8776/v1/64b6f3fbcc53435e8a60fcf89bb6617a",
- "region": "regionOne"
- },
- {
- "id": "719b92ea82a04e7a9ff1107c62da10da",
- "interface": "public",
- "url": "http://127.0.0.1:8776/v1/64b6f3fbcc53435e8a60fcf89bb6617a",
- "region": "regionOne"
- }
- ],
- "type": "volume",
- "name": "volume",
- "id":"547e9195d1914b5eb087bedbc98fccc3"
- },
- {
- "endpoints": [
- {
- "id": "44752324c0d44375bc854168ea22f1fc",
- "interface": "admin",
- "url": "http://127.0.0.1:9292/v1",
- "region": "regionOne"
- },
- {
- "id": "a59b3734f57449078f1637c10f96c8e8",
- "interface": "internal",
- "url": "http://127.0.0.1:9292/v1",
- "region": "regionOne"
- },
- {
- "id": "16c3ab1a4df640569812e432c98b2a48",
- "interface": "public",
- "url": "http://127.0.0.1:9292/v1",
- "region": "regionOne"
- }
- ],
- "type": "image",
- "name": "glance",
- "id": "22c15d232e55419eb4aeb3ebbd12aac2"
- },
- {
- "endpoints": [
- {
- "id": "9c2fdc2d45bb45c5a7f973e235e0f998",
- "interface": "admin",
- "url": "http://127.0.0.1:8774/v1.1/64b6f3fbcc53435e8a60fcf89bb6617a",
- "region": "regionOne"
- },
- {
- "id": "88ccfa8cbb7743998b38b998f4e6a720",
- "interface": "internal",
- "url": "http://127.0.0.1:8774/v1.1/64b6f3fbcc53435e8a60fcf89bb6617a",
- "region": "regionOne"
- },
- {
- "id": "113ee928c6934c92b9a12bd4e456c804",
- "interface": "public",
- "url": "http://127.0.0.1:8774/v1.1/64b6f3fbcc53435e8a60fcf89bb6617a",
- "region": "regionOne"
- }
- ],
- "type": "compute",
- "name": "nova",
- "id": "fbf2afcdeb10473392636df9785d3fb5"
- },
- {
- "endpoints": [
- {
- "id": "c10a5cda00784049953296d18464aa38",
- "interface": "admin",
- "url": "http://127.0.0.1:35357/v3",
- "region": "RegionOne"
- },
- {
- "id": "334650263e064428bb2f0b7c3c7a743c",
- "interface": "internal",
- "url": "http://127.0.0.1:35357/v3",
- "region": "RegionOne"
- },
- {
- "id": "52ff54addc38430d9b656c7164e2caf8",
- "interface": "public",
- "url": "http://127.0.0.1:5000/v3",
- "region": "RegionOne"
- }
- ],
- "type": "identity",
- "name": "keystone",
- "id": "a0d9913a4bca4d5699e151804e0b5172"
- }
- ],
- "user": {
- "domain": {
- "id": "domain_id1",
- "name": "domain_name1"
- },
- "name": "user_name1",
- "id": "user_id1"
- }
- }
-}
diff --git a/pkg/api/routers/alert.go b/pkg/api/routers/alert.go
deleted file mode 100644
index 168cb2a05..000000000
--- a/pkg/api/routers/alert.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package routers
-
-import (
- "github.com/astaxie/beego"
- "github.com/opensds/opensds/pkg/api/controllers"
- "github.com/opensds/opensds/pkg/utils/constants"
-)
-
-func init() {
-
- // add router for alert api
- alertns :=
- beego.NewNamespace("/"+constants.APIVersion,
-
- // All operations of alert can be used for both admin and users.
- beego.NSRouter("/alert", controllers.NewAlertPortal(), "post:CreateAlert"),
- )
- beego.AddNamespace(alertns)
-}
diff --git a/pkg/api/routers/block.go b/pkg/api/routers/block.go
deleted file mode 100755
index 78ba63331..000000000
--- a/pkg/api/routers/block.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package routers
-
-import (
- "github.com/astaxie/beego"
- "github.com/opensds/opensds/pkg/api/controllers"
- "github.com/opensds/opensds/pkg/utils/constants"
-)
-
-func init() {
-
- // add router for block storage api
- blockns :=
- beego.NewNamespace("/"+constants.APIVersion+"/:tenantId/block",
-
- // Volume is the logical description of a piece of storage, which can be directly used by users.
- // All operations of volume can be used for both admin and users.
- beego.NSRouter("/volumes", controllers.NewVolumePortal(), "post:CreateVolume;get:ListVolumes"),
- beego.NSRouter("/volumes/:volumeId", controllers.NewVolumePortal(), "get:GetVolume;put:UpdateVolume;delete:DeleteVolume"),
- // Extend Volume
- beego.NSRouter("/volumes/:volumeId/resize", controllers.NewVolumePortal(), "post:ExtendVolume"),
-
- // Creates, shows, lists, unpdates and deletes attachment.
- beego.NSRouter("/attachments", controllers.NewVolumeAttachmentPortal(), "post:CreateVolumeAttachment;get:ListVolumeAttachments"),
- beego.NSRouter("/attachments/:attachmentId", controllers.NewVolumeAttachmentPortal(), "get:GetVolumeAttachment;put:UpdateVolumeAttachment;delete:DeleteVolumeAttachment"),
-
- // Snapshot is a point-in-time copy of the data that a volume contains.
- // Creates, shows, lists, unpdates and deletes snapshot.
- beego.NSRouter("/snapshots", controllers.NewVolumeSnapshotPortal(), "post:CreateVolumeSnapshot;get:ListVolumeSnapshots"),
- beego.NSRouter("/snapshots/:snapshotId", controllers.NewVolumeSnapshotPortal(), "get:GetVolumeSnapshot;put:UpdateVolumeSnapshot;delete:DeleteVolumeSnapshot"),
-
- // Creates, shows, lists, unpdates and deletes replication.
- beego.NSRouter("/replications", controllers.NewReplicationPortal(), "post:CreateReplication;get:ListReplications"),
- beego.NSRouter("/replications/detail", controllers.NewReplicationPortal(), "get:ListReplicationsDetail"),
- beego.NSRouter("/replications/:replicationId", controllers.NewReplicationPortal(), "get:GetReplication;put:UpdateReplication;delete:DeleteReplication"),
- beego.NSRouter("/replications/:replicationId/enable", controllers.NewReplicationPortal(), "post:EnableReplication"),
- beego.NSRouter("/replications/:replicationId/disable", controllers.NewReplicationPortal(), "post:DisableReplication"),
- beego.NSRouter("/replications/:replicationId/failover", controllers.NewReplicationPortal(), "post:FailoverReplication"),
- // Volume group contains a list of volumes that are used in the same application.
- beego.NSRouter("/volumeGroups", controllers.NewVolumeGroupPortal(), "post:CreateVolumeGroup;get:ListVolumeGroups"),
- beego.NSRouter("/volumeGroups/:groupId", controllers.NewVolumeGroupPortal(), "put:UpdateVolumeGroup;get:GetVolumeGroup;delete:DeleteVolumeGroup"),
- )
- beego.AddNamespace(blockns)
-}
diff --git a/pkg/api/routers/fileshare.go b/pkg/api/routers/fileshare.go
deleted file mode 100644
index caf2c63fa..000000000
--- a/pkg/api/routers/fileshare.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package routers
-
-import (
- "github.com/astaxie/beego"
- "github.com/opensds/opensds/pkg/api/controllers"
- "github.com/opensds/opensds/pkg/utils/constants"
-)
-
-func init() {
-
- // add router for file storage api
- filens :=
- // Share is a part of files. At the same time multiple users can access the the same shares.
- beego.NewNamespace("/"+constants.APIVersion+"/:tenantId/file",
- beego.NSRouter("/shares", controllers.NewFileSharePortal(), "post:CreateFileShare;get:ListFileShares"),
- beego.NSRouter("/shares/:fileshareId", controllers.NewFileSharePortal(), "get:GetFileShare;put:UpdateFileShare;delete:DeleteFileShare"),
- // Snapshot is a point-in-time copy of the data that a FileShare contains.
- // Creates, shows, lists, unpdates and deletes snapshot.
- beego.NSRouter("/snapshots", controllers.NewFileShareSnapshotPortal(), "post:CreateFileShareSnapshot;get:ListFileShareSnapshots"),
- beego.NSRouter("/snapshots/:snapshotId", controllers.NewFileShareSnapshotPortal(), "get:GetFileShareSnapshot;put:UpdateFileShareSnapshot;delete:DeleteFileShareSnapshot"),
- // Access is to set acl's for fileshare
- beego.NSRouter("/acls", controllers.NewFileSharePortal(), "post:CreateFileShareAcl;get:ListFileSharesAcl"),
- beego.NSRouter("/acls/:aclId", controllers.NewFileSharePortal(), "get:GetFileShareAcl;delete:DeleteFileShareAcl"),
- )
- beego.AddNamespace(filens)
-}
diff --git a/pkg/api/routers/host.go b/pkg/api/routers/host.go
deleted file mode 100644
index 7141d8466..000000000
--- a/pkg/api/routers/host.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package routers
-
-import (
- "github.com/astaxie/beego"
- "github.com/opensds/opensds/pkg/api/controllers"
- "github.com/opensds/opensds/pkg/utils/constants"
-)
-
-func init() {
-
- // add router for host api
- filens :=
- beego.NewNamespace("/"+constants.APIVersion+"/:tenantId/host",
- beego.NSRouter("/hosts", controllers.NewHostPortal(), "post:CreateHost;get:ListHosts"),
- beego.NSRouter("/hosts/:hostId", controllers.NewHostPortal(), "get:GetHost;put:UpdateHost;delete:DeleteHost"),
- )
- beego.AddNamespace(filens)
-}
diff --git a/pkg/api/routers/metrics.go b/pkg/api/routers/metrics.go
deleted file mode 100644
index b6fbe8d9a..000000000
--- a/pkg/api/routers/metrics.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package routers
-
-import (
- "github.com/astaxie/beego"
-
- "github.com/opensds/opensds/pkg/api/controllers"
- "github.com/opensds/opensds/pkg/utils/constants"
-)
-
-func init() {
-
- // add router for metric api
- metricns :=
- beego.NewNamespace("/"+constants.APIVersion+"/:tenantId/metrics",
-
- // All operations of metrics can be used for both admin and users.
- beego.NSRouter("/", controllers.NewMetricsPortal(), "post:CollectMetrics;get:GetMetrics"),
-
- // All operations of file can be used for both admin and users.
- beego.NSRouter("/uploadconf", controllers.NewMetricsPortal(), "post:UploadConfFile"),
- beego.NSRouter("/downloadconf", controllers.NewMetricsPortal(), "get:DownloadConfFile"),
-
- beego.NSRouter("/urls", controllers.NewMetricsPortal(), "get:GetUrls"),
- )
- beego.AddNamespace(metricns)
-}
diff --git a/pkg/api/routers/router.go b/pkg/api/routers/router.go
deleted file mode 100755
index d4a79177e..000000000
--- a/pkg/api/routers/router.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements a entry into the OpenSDS northbound REST service.
-
-*/
-
-package routers
-
-import (
- "github.com/astaxie/beego"
- bctx "github.com/astaxie/beego/context"
- "github.com/opensds/opensds/pkg/api/controllers"
- "github.com/opensds/opensds/pkg/utils/constants"
-)
-
-func init() {
-
- // add router for v1beta api
- ns :=
- beego.NewNamespace("/"+constants.APIVersion,
- beego.NSCond(func(ctx *bctx.Context) bool {
- // To judge whether the scheme is legal or not.
- if ctx.Input.Scheme() != "http" && ctx.Input.Scheme() != "https" {
- return false
- }
-
- return true
- }),
-
- // List all dock services, including a list of dock object
- beego.NSRouter("/:tenantId/docks", &controllers.DockPortal{}, "get:ListDocks"),
- // Show one dock service, including endpoint, driverName and so on
- beego.NSRouter("/:tenantId/docks/:dockId", &controllers.DockPortal{}, "get:GetDock"),
-
- // Profile is a set of policies configured by admin and provided for users
- // CreateProfile, UpdateProfile and DeleteProfile are used for admin only
- // ListProfiles and GetProfile are used for both admin and users
- beego.NSRouter("/:tenantId/profiles", &controllers.ProfilePortal{}, "post:CreateProfile;get:ListProfiles"),
- beego.NSRouter("/:tenantId/profiles/:profileId", &controllers.ProfilePortal{}, "get:GetProfile;put:UpdateProfile;delete:DeleteProfile"),
-
- // All operations of customProperties are used for admin only
- beego.NSRouter("/:tenantId/profiles/:profileId/customProperties", &controllers.ProfilePortal{}, "post:AddCustomProperty;get:ListCustomProperties"),
- beego.NSRouter("/:tenantId/profiles/:profileId/customProperties/:customKey", &controllers.ProfilePortal{}, "delete:RemoveCustomProperty"),
-
- // Pool is the virtual description of backend storage, usually divided into block, file and object,
- // and every pool is atomic, which means every pool contains a specific set of features.
- // ListPools and GetPool are used for checking the status of backend pool, admin only
- beego.NSRouter("/:tenantId/pools", &controllers.PoolPortal{}, "get:ListPools"),
- beego.NSRouter("/:tenantId/pools/:poolId", &controllers.PoolPortal{}, "get:GetPool"),
- beego.NSRouter("/:tenantId/availabilityZones", &controllers.PoolPortal{}, "get:ListAvailabilityZones"),
- )
- beego.AddNamespace(ns)
-
- // add router for api version
- beego.Router("/", &controllers.VersionPortal{}, "get:ListVersions")
- beego.Router("/:apiVersion", &controllers.VersionPortal{}, "get:GetVersion")
-}
diff --git a/pkg/api/util/db.go b/pkg/api/util/db.go
deleted file mode 100644
index 45fd7249e..000000000
--- a/pkg/api/util/db.go
+++ /dev/null
@@ -1,955 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements API-related database operations.
-*/
-
-package util
-
-import (
- "errors"
- "fmt"
- "net"
- "regexp"
- "strings"
- "time"
-
- log "github.com/golang/glog"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
- "github.com/opensds/opensds/pkg/utils"
- "github.com/opensds/opensds/pkg/utils/constants"
- uuid "github.com/satori/go.uuid"
-)
-
-//function to store filesahreAcl metadata into database
-func CreateFileShareAclDBEntry(ctx *c.Context, in *model.FileShareAclSpec) (*model.FileShareAclSpec, error) {
- if in.Id == "" {
- in.Id = uuid.NewV4().String()
- }
- if in.CreatedAt == "" {
- in.CreatedAt = time.Now().Format(constants.TimeFormat)
- }
- if in.UpdatedAt == "" {
- in.UpdatedAt = time.Now().Format(constants.TimeFormat)
- }
- in.Status = model.FileShareAclAvailable
- // validate profileId
- if in.ProfileId == "" {
- errMsg := "profile id can not be empty when creating fileshare acl in db!"
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- // validate type
- if in.Type != "ip" {
- errMsg := fmt.Sprintf("invalid fileshare type: %v. Supported type is: ip", in.Type)
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- // validate accessTo
- accessto := in.AccessTo
- if in.AccessTo == "" {
- errMsg := fmt.Sprintf("accessTo is empty. Please give valid ip segment")
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- } else if strings.Contains(accessto, "/") {
- first, cidr, bool := net.ParseCIDR(accessto)
- log.Info(first, cidr)
- if bool != nil {
- errMsg := fmt.Sprintf("invalid IP segment %v", accessto)
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- } else {
- server := net.ParseIP(in.AccessTo)
- if server == nil {
- errMsg := fmt.Sprintf("%v is not a valid ip. Please give the proper ip", in.AccessTo)
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- }
- // validate accesscapability
- accessCapability := in.AccessCapability
- if len(accessCapability) == 0 {
- errMsg := fmt.Sprintf("empty fileshare accesscapability. Supported accesscapability are: {read, write}")
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- permissions := []string{"write", "read"}
- for _, value := range accessCapability {
- value = strings.ToLower(value)
- if !(utils.Contains(permissions, value)) {
- errMsg := fmt.Sprintf("invalid fileshare accesscapability: %v. Supported accesscapability are: {read, write}", value)
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- }
- // get fileshare details
- fileshare, err := db.C.GetFileShare(ctx, in.FileShareId)
- if err != nil {
- log.Error("file shareid is not valid: ", err)
- return nil, err
- }
- if fileshare.Status != model.FileShareAvailable {
- var errMsg = "only the status of file share is available, the acl can be created"
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
-
- return db.C.CreateFileShareAcl(ctx, in)
-}
-
-func DeleteFileShareAclDBEntry(ctx *c.Context, in *model.FileShareAclSpec) error {
- // If fileshare id is invalid, it would mean that fileshare acl
- // creation failed before the create method in storage driver was
- // called, and delete its db entry directly.
- validStatus := []string{model.FileShareAclAvailable, model.FileShareAclError,
- model.FileShareAclErrorDeleting}
- if !utils.Contained(in.Status, validStatus) {
- errMsg := fmt.Sprintf("only the file share acl with the status available, error, error_deleting can be deleted, the fileshare status is %s", in.Status)
- log.Error(errMsg)
- return errors.New(errMsg)
- }
-
- // If fileshare id is invalid, it would mean that file share acl creation failed before the create method
- // in storage driver was called, and delete its db entry directly.
- _, err := db.C.GetFileShare(ctx, in.FileShareId)
- if err != nil {
- if err := db.C.DeleteFileShareAcl(ctx, in.Id); err != nil {
- log.Error("failed delete fileshare acl in db:", err)
- return err
- }
- return nil
- }
-
- in.Status = model.FileShareAclDeleting
- _, err = db.C.UpdateFileShareAcl(ctx, in)
- if err != nil {
- return err
- }
- return nil
-}
-
-// Function to store metadeta of fileshare into database
-func CreateFileShareDBEntry(ctx *c.Context, in *model.FileShareSpec) (*model.FileShareSpec, error) {
- if in.Id == "" {
- in.Id = uuid.NewV4().String()
- }
- // validate profileId
- if in.ProfileId == "" {
- errMsg := "profile id can not be empty when creating fileshare in db!"
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- // validate the size
- if in.Size <= 0 {
- errMsg := fmt.Sprintf("invalid fileshare size: %d", in.Size)
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- if in.AvailabilityZone == "" {
- log.Warning("Use default availability zone when user doesn't specify availabilityZone.")
- in.AvailabilityZone = "default"
- }
- if in.CreatedAt == "" {
- in.CreatedAt = time.Now().Format(constants.TimeFormat)
- }
- if in.UpdatedAt == "" {
- in.UpdatedAt = time.Now().Format(constants.TimeFormat)
- }
- //validate the name
- if in.Name == "" {
- errMsg := fmt.Sprintf("empty fileshare name is not allowed. Please give valid name.")
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- if len(in.Name) > 255 {
- errMsg := fmt.Sprintf("fileshare name length should not be more than 255 characters. input name length is : %d", len(in.Name))
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
-
- reg, err := regexp.Compile("^[a-zA-Z0-9_-]+$")
- if err != nil {
- errMsg := fmt.Sprintf("regex compilation for file name validation failed")
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- if reg.MatchString(in.Name) == false {
- errMsg := fmt.Sprintf("invalid fileshare name it only contain english char and number : %v", in.Name)
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
-
- in.UserId = ctx.UserId
- in.Status = model.FileShareCreating
- // Store the fileshare meadata into database.
- return db.C.CreateFileShare(ctx, in)
-}
-
-// DeleteFileShareDBEntry just modifies the state of the fileshare to be deleting in
-// the DB, the real deletion operation would be executed in another new thread.
-func DeleteFileShareDBEntry(ctx *c.Context, in *model.FileShareSpec) error {
- validStatus := []string{model.FileShareAvailable, model.FileShareError,
- model.FileShareErrorDeleting}
- if !utils.Contained(in.Status, validStatus) {
- errMsg := fmt.Sprintf("only the fileshare with the status available, error, errorDeleting, can be deleted, the fileshare status is %s", in.Status)
- log.Error(errMsg)
- return errors.New(errMsg)
- }
-
- snaps, err := db.C.ListSnapshotsByShareId(ctx, in.Id)
- if err != nil {
- return err
- }
- if len(snaps) > 0 {
- errMsg := fmt.Sprintf("file share %s can not be deleted, because it still has snapshots", in.Id)
- log.Error(errMsg)
- return errors.New(errMsg)
- }
-
- acls, err := db.C.ListFileShareAclsByShareId(ctx, in.Id)
- if err != nil {
- return err
- }
- if len(acls) > 0 {
- errMsg := fmt.Sprintf("file share %s can not be deleted, because it still has acls", in.Id)
- log.Error(errMsg)
- return errors.New(errMsg)
- }
-
- in.Status = model.FileShareDeleting
- _, err = db.C.UpdateFileShare(ctx, in)
- if err != nil {
- return err
- }
- return nil
-}
-
-// To create entry in database
-func CreateFileShareSnapshotDBEntry(ctx *c.Context, in *model.FileShareSnapshotSpec) (*model.FileShareSnapshotSpec, error) {
- fshare, err := db.C.GetFileShare(ctx, in.FileShareId)
- if err != nil {
- log.Error("get fileshare failed in create fileshare snapshot method: ", err)
- return nil, err
- }
- if fshare.Status != model.FileShareAvailable && fshare.Status != model.FileShareInUse {
- var errMsg = "only the status of fileshare is available or in-use, the snapshot can be created"
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- // validate profileId
- if in.ProfileId == "" {
- errMsg := "profile id can not be empty when creating fileshare snapshot in db!"
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
-
- // Check existence of fileshare snapshot name #931
- filesnaps, err := db.C.ListFileShareSnapshots(ctx)
- if err != nil {
- errMsg := fmt.Sprintf("get list of fileshare snapshot failed: %s", err.Error())
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- } else {
- for _, filesnap := range filesnaps {
- if filesnap.Name == in.Name {
- errMsg := fmt.Sprintf("file share snapshot name already exists")
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- }
- }
-
- if in.Id == "" {
- in.Id = uuid.NewV4().String()
- }
- if in.CreatedAt == "" {
- in.CreatedAt = time.Now().Format(constants.TimeFormat)
- }
-
- //validate the snapshot name
- if in.Name == "" {
- errMsg := fmt.Sprintf("snapshot name can not be empty. Please give valid snapshot name")
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- if strings.HasPrefix(in.Name, "snapshot") {
- errMsg := fmt.Sprintf("names starting 'snapshot' are reserved. Please choose a different snapshot name.")
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- reg, err := regexp.Compile("^[a-zA-Z0-9 _-]+$")
- if err != nil {
- errMsg := fmt.Sprintf("regex compilation for file share snapshot validation failed")
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- if reg.MatchString(in.Description) == false {
- errMsg := fmt.Sprintf("fileshare snapshot creation failed, because description has some special chars: %v. Description only support english char and number", in.Description)
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- in.Status = model.FileShareSnapCreating
- in.Metadata = fshare.Metadata
- return db.C.CreateFileShareSnapshot(ctx, in)
-}
-
-func DeleteFileShareSnapshotDBEntry(ctx *c.Context, in *model.FileShareSnapshotSpec) error {
- validStatus := []string{model.FileShareSnapAvailable, model.FileShareSnapError,
- model.FileShareSnapErrorDeleting}
- if !utils.Contained(in.Status, validStatus) {
- errMsg := fmt.Sprintf("only the fileshare snapshot with the status available, error, error_deleting can be deleted, the fileshare status is %s", in.Status)
- log.Error(errMsg)
- return errors.New(errMsg)
- }
-
- // If fileshare id is invalid, it would mean that fileshare snapshot creation failed before the create method
- // in storage driver was called, and delete its db entry directly.
- _, err := db.C.GetFileShare(ctx, in.FileShareId)
- if err != nil {
- if err := db.C.DeleteFileShareSnapshot(ctx, in.Id); err != nil {
- log.Error("when delete fileshare snapshot in db:", err)
- return err
- }
- return nil
- }
-
- in.Status = model.FileShareSnapDeleting
- _, err = db.C.UpdateFileShareSnapshot(ctx, in.Id, in)
- if err != nil {
- return err
- }
- return nil
-}
-
-func CreateVolumeDBEntry(ctx *c.Context, in *model.VolumeSpec) (*model.VolumeSpec, error) {
- if in.Id == "" {
- in.Id = uuid.NewV4().String()
- }
- if in.Size <= 0 {
- errMsg := fmt.Sprintf("invalid volume size: %d", in.Size)
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- if in.ProfileId == "" {
- errMsg := "profile id can not be empty when creating volume in db"
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- if in.Name == "" {
- errMsg := "empty volume name is not allowed. Please give valid name"
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- if len(in.Name) > 255 {
- errMsg := fmt.Sprintf("volume name length should not more than 255 characters. current length is : %d", len(in.Name))
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- reg, err := regexp.Compile("^[a-zA-Z0-9 _-]+$")
- if err != nil {
- errMsg := fmt.Sprintf("regex compilation for volume name validation failed")
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- if reg.MatchString(in.Name) == false {
- errMsg := fmt.Sprintf("invalid volume name, it should only contain english char and number : %v", in.Name)
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
-
- if in.SnapshotId != "" {
- snap, err := db.C.GetVolumeSnapshot(ctx, in.SnapshotId)
- if err != nil {
- log.Error("get snapshot failed in create volume method: ", err)
- return nil, err
- }
- if snap.Status != model.VolumeSnapAvailable {
- var errMsg = "only if the snapshot is available, the volume can be created"
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- if snap.Size > in.Size {
- var errMsg = "size of volume must be equal to or bigger than size of the snapshot"
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- }
- if in.AvailabilityZone == "" {
- log.Warning("Use default availability zone when user doesn't specify availabilityZone.")
- in.AvailabilityZone = "default"
- }
- if in.CreatedAt == "" {
- in.CreatedAt = time.Now().Format(constants.TimeFormat)
- }
-
- in.UserId = ctx.UserId
- in.Status = model.VolumeCreating
- // Store the volume data into database.
- return db.C.CreateVolume(ctx, in)
-}
-
-// DeleteVolumeDBEntry just modifies the state of the volume to be deleting in
-// the DB, the real deletion operation would be executed in another new thread.
-func DeleteVolumeDBEntry(ctx *c.Context, in *model.VolumeSpec) error {
- validStatus := []string{model.VolumeAvailable, model.VolumeError,
- model.VolumeErrorDeleting, model.VolumeErrorExtending}
- if !utils.Contained(in.Status, validStatus) {
- errMsg := fmt.Sprintf("only the volume with the status available, error, error_deleting, error_extending can be deleted, the volume status is %s", in.Status)
- log.Error(errMsg)
- return errors.New(errMsg)
- }
-
- snaps, err := db.C.ListSnapshotsByVolumeId(ctx, in.Id)
- if err != nil {
- return err
- }
- if len(snaps) > 0 {
- return fmt.Errorf("volume %s can not be deleted, because it still has snapshots", in.Id)
- }
-
- volAttachments, err := db.C.ListAttachmentsByVolumeId(ctx, in.Id)
- if err != nil {
- return err
- }
- if len(volAttachments) > 0 {
- return fmt.Errorf("volume %s can not be deleted, because it's in use", in.Id)
- }
-
- in.Status = model.VolumeDeleting
- _, err = db.C.UpdateVolume(ctx, in)
- if err != nil {
- return err
- }
- return nil
-}
-
-// ExtendVolumeDBEntry just modifies the state of the volume to be extending in
-// the DB, the real operation would be executed in another new thread, and the
-// new size would be updated in controller module.
-func ExtendVolumeDBEntry(ctx *c.Context, volID string, in *model.ExtendVolumeSpec) (*model.VolumeSpec, error) {
- volume, err := db.C.GetVolume(ctx, volID)
- if err != nil {
- log.Error("get volume failed in extend volume method: ", err)
- return nil, err
- }
-
- if volume.Status != model.VolumeAvailable {
- errMsg := "the status of the volume to be extended must be available!"
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- if in.NewSize <= volume.Size {
- errMsg := fmt.Sprintf("new size for extend must be greater than current size."+
- "(current: %d GB, extended: %d GB).", volume.Size, in.NewSize)
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
-
- volume.Status = model.VolumeExtending
- // Store the volume data into database.
- return db.C.ExtendVolume(ctx, volume)
-}
-
-// CreateVolumeSnapshotDBEntry just modifies the state of the volume snapshot
-// to be creating in the DB, the real operation would be executed in another new
-// thread.
-func CreateVolumeSnapshotDBEntry(ctx *c.Context, in *model.VolumeSnapshotSpec) (*model.VolumeSnapshotSpec, error) {
- if in.ProfileId == "" {
- errMsg := "profile id can not be empty when creating volume snapshot in db"
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
- vol, err := db.C.GetVolume(ctx, in.VolumeId)
- if err != nil {
- log.Error("get volume failed in create volume snapshot method: ", err)
- return nil, err
- }
- if vol.Status != model.VolumeAvailable && vol.Status != model.VolumeInUse {
- errMsg := "only the status of volume is available or in-use, the snapshot can be created"
- log.Error(errMsg)
- return nil, errors.New(errMsg)
- }
-
- if in.Id == "" {
- in.Id = uuid.NewV4().String()
- }
- if in.CreatedAt == "" {
- in.CreatedAt = time.Now().Format(constants.TimeFormat)
- }
-
- in.Status = model.VolumeSnapCreating
- in.Metadata = utils.MergeStringMaps(in.Metadata, vol.Metadata)
- return db.C.CreateVolumeSnapshot(ctx, in)
-}
-
-// DeleteVolumeSnapshotDBEntry just modifies the state of the volume snapshot to
-// be deleting in the DB, the real deletion operation would be executed in
-// another new thread.
-func DeleteVolumeSnapshotDBEntry(ctx *c.Context, in *model.VolumeSnapshotSpec) error {
- validStatus := []string{model.VolumeSnapAvailable, model.VolumeSnapError,
- model.VolumeSnapErrorDeleting}
- if !utils.Contained(in.Status, validStatus) {
- errMsg := fmt.Sprintf("only the volume snapshot with the status available, error, error_deleting can be deleted, the volume status is %s", in.Status)
- log.Error(errMsg)
- return errors.New(errMsg)
- }
-
- // If volume id is invalid, it would mean that volume snapshot creation failed before the create method
- // in storage driver was called, and delete its db entry directly.
- _, err := db.C.GetVolume(ctx, in.VolumeId)
- if err != nil {
- if err := db.C.DeleteVolumeSnapshot(ctx, in.Id); err != nil {
- log.Error("when delete volume snapshot in db:", err)
- return err
- }
- return nil
- }
-
- in.Status = model.VolumeSnapDeleting
- _, err = db.C.UpdateVolumeSnapshot(ctx, in.Id, in)
- if err != nil {
- return err
- }
- return nil
-}
-
-// CreateReplicationDBEntry just modifies the state of the volume replication
-// to be creating in the DB, the real deletion operation would be executed
-// in another new thread.
-func CreateReplicationDBEntry(ctx *c.Context, in *model.ReplicationSpec) (*model.ReplicationSpec, error) {
- pVol, err := db.C.GetVolume(ctx, in.PrimaryVolumeId)
- if err != nil {
- log.Error("get primary volume failed in create volume replication method: ", err)
- return nil, err
- }
- if pVol.Status != model.VolumeAvailable && pVol.Status != model.VolumeInUse {
- var errMsg = fmt.Errorf("only the status of primary volume is available or in-use, the replicaiton can be created")
- log.Error(errMsg)
- return nil, errMsg
- }
- sVol, err := db.C.GetVolume(ctx, in.SecondaryVolumeId)
- if err != nil {
- log.Error("get secondary volume failed in create volume replication method: ", err)
- return nil, err
- }
- if sVol.Status != model.VolumeAvailable && sVol.Status != model.VolumeInUse {
- var errMsg = fmt.Errorf("only the status of secondary volume is available or in-use, the replicaiton can be created")
- log.Error(errMsg)
- return nil, errMsg
- }
-
- // Check if specified volume has already been used in other replication.
- v, err := db.C.GetReplicationByVolumeId(ctx, in.PrimaryVolumeId)
- if err != nil {
- if _, ok := err.(*model.NotFoundError); !ok {
- var errMsg = fmt.Errorf("get replication by primary volume id %s failed: %v",
- in.PrimaryVolumeId, err)
- log.Error(errMsg)
- return nil, errMsg
- }
- }
-
- if v != nil {
- var errMsg = fmt.Errorf("specified primary volume(%s) has already been used in replication(%s)",
- in.PrimaryVolumeId, v.Id)
- log.Error(errMsg)
- return nil, errMsg
- }
-
- // check if specified volume has already been used in other replication.
- v, err = db.C.GetReplicationByVolumeId(ctx, in.SecondaryVolumeId)
- if err != nil {
- if _, ok := err.(*model.NotFoundError); !ok {
- var errMsg = fmt.Errorf("get replication by secondary volume id %s failed: %v",
- in.SecondaryVolumeId, err)
- log.Error(errMsg)
- return nil, errMsg
- }
- }
- if v != nil {
- var errMsg = fmt.Errorf("specified secondary volume(%s) has already been used in replication(%s)",
- in.SecondaryVolumeId, v.Id)
- log.Error(errMsg)
- return nil, errMsg
- }
-
- if in.Id == "" {
- in.Id = uuid.NewV4().String()
- }
- if in.CreatedAt == "" {
- in.CreatedAt = time.Now().Format(constants.TimeFormat)
- }
-
- in.ReplicationStatus = model.ReplicationCreating
- return db.C.CreateReplication(ctx, in)
-}
-
-// DeleteReplicationDBEntry just modifies the state of the volume replication to
-// be deleting in the DB, the real deletion operation would be executed in
-// another new thread.
-func DeleteReplicationDBEntry(ctx *c.Context, in *model.ReplicationSpec) error {
- invalidStatus := []string{model.ReplicationCreating, model.ReplicationDeleting, model.ReplicationEnabling,
- model.ReplicationDisabling, model.ReplicationFailingOver, model.ReplicationFailingBack}
-
- if utils.Contained(in.ReplicationStatus, invalidStatus) {
- errMsg := fmt.Sprintf("can't delete the replication in %s", in.ReplicationStatus)
- log.Error(errMsg)
- return errors.New(errMsg)
- }
-
- in.ReplicationStatus = model.ReplicationDeleting
- _, err := db.C.UpdateReplication(ctx, in.Id, in)
- if err != nil {
- return err
- }
- return nil
-}
-
-// EnableReplicationDBEntry just modifies the state of the volume replication to
-// be enabling in the DB, the real deletion operation would be executed in
-// another new thread.
-func EnableReplicationDBEntry(ctx *c.Context, in *model.ReplicationSpec) error {
- invalidStatus := []string{model.ReplicationCreating, model.ReplicationDeleting, model.ReplicationEnabling,
- model.ReplicationDisabling, model.ReplicationFailingOver, model.ReplicationFailingBack}
- if utils.Contained(in.ReplicationStatus, invalidStatus) {
- errMsg := fmt.Sprintf("can't enable the replication in %s", in.ReplicationStatus)
- log.Error(errMsg)
- return errors.New(errMsg)
- }
-
- in.ReplicationStatus = model.ReplicationEnabling
- _, err := db.C.UpdateReplication(ctx, in.Id, in)
- if err != nil {
- return err
- }
- return nil
-}
-
-// DisableReplicationDBEntry just modifies the state of the volume replication to
-// be disabling in the DB, the real deletion operation would be executed in
-// another new thread.
-func DisableReplicationDBEntry(ctx *c.Context, in *model.ReplicationSpec) error {
- invalidStatus := []string{model.ReplicationCreating, model.ReplicationDeleting, model.ReplicationEnabling,
- model.ReplicationDisabling, model.ReplicationFailingOver, model.ReplicationFailingBack}
- if utils.Contained(in.ReplicationStatus, invalidStatus) {
- errMsg := fmt.Sprintf("can't disable the replication in %s", in.ReplicationStatus)
- log.Error(errMsg)
- return errors.New(errMsg)
- }
-
- in.ReplicationStatus = model.ReplicationDisabling
- _, err := db.C.UpdateReplication(ctx, in.Id, in)
- if err != nil {
- return err
- }
- return nil
-}
-
-// FailoverReplicationDBEntry just modifies the state of the volume replication
-// to be failing_over or failing_back in the DB, the real deletion operation
-// would be executed in another new thread.
-func FailoverReplicationDBEntry(ctx *c.Context, in *model.ReplicationSpec, secondaryBackendId string) error {
- invalidStatus := []string{model.ReplicationCreating, model.ReplicationDeleting, model.ReplicationEnabling,
- model.ReplicationDisabling, model.ReplicationFailingOver, model.ReplicationFailingBack}
- if utils.Contained(in.ReplicationStatus, invalidStatus) {
- errMsg := fmt.Sprintf("can't fail over/back the replication in %s", in.ReplicationStatus)
- log.Error(errMsg)
- return errors.New(errMsg)
- }
-
- if secondaryBackendId == model.ReplicationDefaultBackendId {
- in.ReplicationStatus = model.ReplicationFailingOver
- } else {
- in.ReplicationStatus = model.ReplicationFailingBack
- }
- _, err := db.C.UpdateReplication(ctx, in.Id, in)
- if err != nil {
- return err
- }
- return nil
-}
-
-// CreateVolumeGroupDBEntry just modifies the state of the volume group
-// to be creating in the DB, the real deletion operation would be
-// executed in another new thread.
-func CreateVolumeGroupDBEntry(ctx *c.Context, in *model.VolumeGroupSpec) (*model.VolumeGroupSpec, error) {
- if len(in.Profiles) == 0 {
- msg := fmt.Sprintf("profiles must be provided to create volume group.")
- log.Error(msg)
- return nil, errors.New(msg)
- }
-
- if in.Id == "" {
- in.Id = uuid.NewV4().String()
- }
- if in.CreatedAt == "" {
- in.CreatedAt = time.Now().Format(constants.TimeFormat)
- }
- if in.AvailabilityZone == "" {
- log.Warning("Use default availability zone when user doesn't specify availabilityZone.")
- in.AvailabilityZone = "default"
- }
-
- in.Status = model.VolumeGroupCreating
- return db.C.CreateVolumeGroup(ctx, in)
-}
-
-// UpdateVolumeGroupDBEntry just modifies the state of the volume group
-// to be updating in the DB, the real deletion operation would be
-// executed in another new thread.
-func UpdateVolumeGroupDBEntry(ctx *c.Context, vgUpdate *model.VolumeGroupSpec) ([]string, []string, error) {
- vg, err := db.C.GetVolumeGroup(ctx, vgUpdate.Id)
- if err != nil {
- return nil, nil, err
- }
-
- var name string
- if vg.Name == vgUpdate.Name {
- name = ""
- } else {
- name = vgUpdate.Name
- }
- var description string
- if vg.Description == vgUpdate.Description {
- description = ""
- } else {
- description = vgUpdate.Description
- }
- vgUpdate.Profiles = vg.Profiles
- vgUpdate.PoolId = vg.PoolId
-
- var invalidUuids []string
- for _, uuidAdd := range vgUpdate.AddVolumes {
- for _, uuidRemove := range vgUpdate.RemoveVolumes {
- if uuidAdd == uuidRemove {
- invalidUuids = append(invalidUuids, uuidAdd)
- }
- }
- }
- if len(invalidUuids) > 0 {
- msg := fmt.Sprintf("uuid %s is in both add and remove volume list", strings.Join(invalidUuids, ","))
- log.Error(msg)
- return nil, nil, errors.New(msg)
- }
-
- volumes, err := db.C.ListVolumesByGroupId(ctx, vgUpdate.Id)
- if err != nil {
- return nil, nil, err
- }
-
- var addVolumesNew, removeVolumesNew []string
- // Validate volumes in AddVolumes and RemoveVolumes.
- if len(vgUpdate.AddVolumes) > 0 {
- if addVolumesNew, err = ValidateAddVolumes(ctx, volumes, vgUpdate.AddVolumes, vgUpdate); err != nil {
- return nil, nil, err
- }
- }
- if len(vgUpdate.RemoveVolumes) > 0 {
- if removeVolumesNew, err = ValidateRemoveVolumes(ctx, volumes, vgUpdate.RemoveVolumes, vgUpdate); err != nil {
- return nil, nil, err
- }
- }
-
- if name == "" && description == "" && len(addVolumesNew) == 0 && len(removeVolumesNew) == 0 {
- msg := fmt.Sprintf("update group %s faild, because no valid name, description, addvolumes or removevolumes were provided", vgUpdate.Id)
- log.Error(msg)
- return nil, nil, errors.New(msg)
- }
-
- vgNew := &model.VolumeGroupSpec{
- BaseModel: &model.BaseModel{
- Id: vg.Id,
- },
- }
-
- vgNew.UpdatedAt = time.Now().Format(constants.TimeFormat)
- // Only update name or description. No need to send them over through an RPC call and set status to available.
- if name != "" {
- vgNew.Name = name
- }
- if description != "" {
- vgNew.Description = description
- }
- if len(addVolumesNew) == 0 && len(removeVolumesNew) == 0 {
- vgNew.Status = model.VolumeGroupAvailable
- } else {
- vgNew.Status = model.VolumeGroupUpdating
- }
-
- _, err = db.C.UpdateVolumeGroup(ctx, vgNew)
- if err != nil {
- log.Errorf("when update volume group in db: %v", err)
- return nil, nil, err
- }
-
- return addVolumesNew, removeVolumesNew, nil
-}
-
-func ValidateAddVolumes(ctx *c.Context, volumes []*model.VolumeSpec, addVolumes []string, vg *model.VolumeGroupSpec) ([]string, error) {
- var addVolumeRef []string
- var flag bool
- for _, volumeId := range addVolumes {
- flag = true
- for _, volume := range volumes {
- if volumeId == volume.Id {
- // Volume already in group. Remove it from addVolumes.
- flag = false
- break
- }
- }
- if flag {
- addVolumeRef = append(addVolumeRef, volumeId)
- }
- }
-
- var addVolumesNew []string
- for _, addVol := range addVolumeRef {
- addVolRef, err := db.C.GetVolume(ctx, addVol)
- if err != nil {
- log.Error(fmt.Sprintf("cannot add volume %s to group %s, volume cannot be found.", addVol, vg.Id))
- return nil, err
- }
- if addVolRef.GroupId != "" {
- return nil, fmt.Errorf("cannot add volume %s to group %s because it is already in group %s", addVolRef.Id, vg.Id, addVolRef.GroupId)
- }
- if addVolRef.ProfileId == "" {
- return nil, fmt.Errorf("cannot add volume %s to group %s , volume has no profile.", addVolRef.Id, vg.Id)
- }
- if !utils.Contained(addVolRef.ProfileId, vg.Profiles) {
- return nil, fmt.Errorf("cannot add volume %s to group %s , volume profile is not supported by the group.", addVolRef.Id, vg.Id)
- }
- if addVolRef.Status != model.VolumeAvailable && addVolRef.Status != model.VolumeInUse {
- return nil, fmt.Errorf("cannot add volume %s to group %s because volume is in invalid status %s", addVolRef.Id, vg.Id, addVolRef.Status)
- }
- if addVolRef.PoolId != vg.PoolId {
- return nil, fmt.Errorf("cannot add volume %s to group %s , volume is not local to the pool of group.", addVolRef.Id, vg.Id)
- }
-
- addVolumesNew = append(addVolumesNew, addVolRef.Id)
- }
-
- return addVolumesNew, nil
-}
-
-func ValidateRemoveVolumes(ctx *c.Context, volumes []*model.VolumeSpec, removeVolumes []string, vg *model.VolumeGroupSpec) ([]string, error) {
-
- for _, v := range removeVolumes {
- for _, volume := range volumes {
- if v == volume.Id {
- if volume.Status != model.VolumeAvailable && volume.Status != model.VolumeInUse && volume.Status != model.VolumeError && volume.Status != model.VolumeErrorDeleting {
- return nil, fmt.Errorf("cannot remove volume %s from group %s, volume is in invalid status %s", volume.Id, vg.Id, volume.Status)
- }
- break
- }
-
- }
- }
- for _, v := range removeVolumes {
- var available = false
- for _, volume := range volumes {
- if v == volume.Id {
- available = true
- break
- }
- }
- if available == false {
- return nil, fmt.Errorf("cannot remove volume %s from group %s, volume is not in group ", v, vg.Id)
- }
- }
-
- return removeVolumes, nil
-}
-
-// DeleteVolumeGroupDBEntry just modifies the state of the volume group
-// to be deleting in the DB, the real deletion operation would be
-// executed in another new thread.
-func DeleteVolumeGroupDBEntry(ctx *c.Context, volumeGroupId string) error {
- vg, err := db.C.GetVolumeGroup(ctx, volumeGroupId)
- if err != nil {
- return err
- }
-
- // If pool id is invalid, it would mean that volume group creation failed before the create method
- // in storage driver was called, and delete its db entry directly.
- _, err = db.C.GetDockByPoolId(ctx, vg.PoolId)
- if err != nil {
- if err := db.C.DeleteVolumeGroup(ctx, vg.Id); err != nil {
- log.Error("when delete volume group in db:", err)
- return err
- }
- return nil
- }
-
- //TODO DeleteVolumes tag is set by policy.
- deleteVolumes := true
-
- if deleteVolumes == false && vg.Status != model.VolumeGroupAvailable && vg.Status != model.VolumeGroupError {
- msg := fmt.Sprintf("the status of the Group must be available or error , group can be deleted. But current status is %s", vg.Status)
- log.Error(msg)
- return errors.New(msg)
- }
-
- if vg.GroupSnapshots != nil {
- msg := fmt.Sprintf("group can not be deleted, because group has existing snapshots")
- log.Error(msg)
- return errors.New(msg)
- }
-
- volumes, err := db.C.ListVolumesByGroupId(ctx, vg.Id)
- if err != nil {
- return err
- }
-
- if len(volumes) > 0 && deleteVolumes == false {
- msg := fmt.Sprintf("group %s still contains volumes. The deleteVolumes flag is required to delete it.", vg.Id)
- log.Error(msg)
- return errors.New(msg)
- }
-
- var volumesUpdate []*model.VolumeSpec
- for _, value := range volumes {
- if value.AttachStatus == model.VolumeAttached {
- msg := fmt.Sprintf("volume %s in group %s is attached. Need to deach first.", value.Id, vg.Id)
- log.Error(msg)
- return errors.New(msg)
- }
-
- snapshots, err := db.C.ListSnapshotsByVolumeId(ctx, value.Id)
- if err != nil {
- return err
- }
- if len(snapshots) > 0 {
- msg := fmt.Sprintf("volume %s in group still has snapshots", value.Id)
- log.Error(msg)
- return errors.New(msg)
- }
-
- volumesUpdate = append(volumesUpdate, &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: value.Id,
- },
- Status: model.VolumeDeleting,
- GroupId: volumeGroupId,
- })
- }
-
- db.C.UpdateStatus(ctx, volumesUpdate, "")
- db.C.UpdateStatus(ctx, vg, model.VolumeGroupDeleting)
-
- return nil
-}
diff --git a/pkg/api/util/db_test.go b/pkg/api/util/db_test.go
deleted file mode 100644
index 7a095c80b..000000000
--- a/pkg/api/util/db_test.go
+++ /dev/null
@@ -1,814 +0,0 @@
-// Copyright 2018 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package util
-
-import (
- "fmt"
- "reflect"
- "strconv"
- "testing"
-
- "github.com/opensds/opensds/pkg/utils"
-
- "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
- . "github.com/opensds/opensds/testutils/collection"
- dbtest "github.com/opensds/opensds/testutils/db/testing"
-)
-
-var assertTestResult = func(t *testing.T, got, expected interface{}) {
- t.Helper()
- if !reflect.DeepEqual(got, expected) {
- t.Errorf("expected %v, got %v\n", expected, got)
- }
-}
-
-func TestCreateVolumeDBEntry(t *testing.T) {
- var in = &model.VolumeSpec{
- BaseModel: &model.BaseModel{},
- Name: "volume sample",
- Description: "This is a sample volume for testing",
- Size: int64(1),
- ProfileId: "3769855c-a102-11e7-b772-17b880d2f537",
- Status: model.VolumeCreating,
- }
-
- t.Run("Everything should work well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("CreateVolume", context.NewAdminContext(), in).Return(&SampleVolumes[0], nil)
- db.C = mockClient
-
- var expected = &SampleVolumes[0]
- result, err := CreateVolumeDBEntry(context.NewAdminContext(), in)
- if err != nil {
- t.Errorf("failed to create volume asynchronously, err is %v\n", err)
- }
- assertTestResult(t, result, expected)
- })
-
- t.Run("The size of volume created should be greater than zero", func(t *testing.T) {
- in.Size = int64(-2)
- mockClient := new(dbtest.Client)
- mockClient.On("CreateVolume", context.NewAdminContext(), in).Return(&SampleVolumes[0], nil)
- db.C = mockClient
-
- _, err := CreateVolumeDBEntry(context.NewAdminContext(), in)
- expectedError := fmt.Sprintf("invalid volume size: %d", in.Size)
- assertTestResult(t, err.Error(), expectedError)
- })
-
- t.Run("The profile id should not be empty", func(t *testing.T) {
- in.Size, in.ProfileId = int64(1), ""
- mockClient := new(dbtest.Client)
- mockClient.On("CreateVolume", context.NewAdminContext(), in).Return(&SampleVolumes[0], nil)
- db.C = mockClient
-
- _, err := CreateVolumeDBEntry(context.NewAdminContext(), in)
- expectedError := "profile id can not be empty when creating volume in db"
- assertTestResult(t, err.Error(), expectedError)
- })
-}
-
-func TestCreateVolumeFromSnapshotDBEntry(t *testing.T) {
- var in = &model.VolumeSpec{
- BaseModel: &model.BaseModel{},
- Name: "volume sample",
- Description: "This is a sample volume for testing",
- Size: int64(1),
- ProfileId: "3769855c-a102-11e7-b772-17b880d2f537",
- Status: model.VolumeCreating,
- SnapshotId: "3769855c-a102-11e7-b772-17b880d2f537",
- }
- var snap = &model.VolumeSnapshotSpec{
- BaseModel: &model.BaseModel{
- Id: "3769855c-a102-11e7-b772-17b880d2f537",
- },
- Size: int64(1),
- Status: model.VolumeSnapAvailable,
- }
-
- t.Run("Everything should work well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("CreateVolume", context.NewAdminContext(), in).Return(&SampleVolumes[1], nil)
- mockClient.On("GetVolumeSnapshot", context.NewAdminContext(), "3769855c-a102-11e7-b772-17b880d2f537").Return(snap, nil)
- db.C = mockClient
-
- var expected = &SampleVolumes[1]
- result, err := CreateVolumeDBEntry(context.NewAdminContext(), in)
- if err != nil {
- t.Errorf("failed to create volume with snapshot, err is %v\n", err)
- }
- assertTestResult(t, result, expected)
- })
-
- t.Run("The status of volume snapshot should always be available", func(t *testing.T) {
- snap.Status = model.VolumeSnapError
- mockClient := new(dbtest.Client)
- mockClient.On("CreateVolume", context.NewAdminContext(), in).Return(&SampleVolumes[1], nil)
- mockClient.On("GetVolumeSnapshot", context.NewAdminContext(), "3769855c-a102-11e7-b772-17b880d2f537").Return(snap, nil)
- db.C = mockClient
-
- _, err := CreateVolumeDBEntry(context.NewAdminContext(), in)
- expectedError := "only if the snapshot is available, the volume can be created"
- assertTestResult(t, err.Error(), expectedError)
- })
-
- t.Run("Size of volume should always be equal to or bigger than size of the snapshot", func(t *testing.T) {
- snap.Status, snap.Size = model.VolumeSnapAvailable, 10
- mockClient := new(dbtest.Client)
- mockClient.On("CreateVolume", context.NewAdminContext(), in).Return(&SampleVolumes[1], nil)
- mockClient.On("GetVolumeSnapshot", context.NewAdminContext(), "3769855c-a102-11e7-b772-17b880d2f537").Return(snap, nil)
- db.C = mockClient
-
- _, err := CreateVolumeDBEntry(context.NewAdminContext(), in)
- expectedError := "size of volume must be equal to or bigger than size of the snapshot"
- assertTestResult(t, err.Error(), expectedError)
- })
-}
-
-func TestDeleteVolumeDBEntry(t *testing.T) {
- var vol = &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: "bd5b12a8-a101-11e7-941e-d77981b584d8",
- },
- Status: model.VolumeAvailable,
- ProfileId: "3769855c-a102-11e7-b772-17b880d2f537",
- PoolId: "3762355c-a102-11e7-b772-17b880d2f537",
- }
- var in = &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: "bd5b12a8-a101-11e7-941e-d77981b584d8",
- },
- Status: model.VolumeDeleting,
- ProfileId: "3769855c-a102-11e7-b772-17b880d2f537",
- PoolId: "3762355c-a102-11e7-b772-17b880d2f537",
- }
-
- t.Run("Everything should work well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("DeleteVolume", context.NewAdminContext(), vol.Id).Return(nil)
- mockClient.On("ListSnapshotsByVolumeId", context.NewAdminContext(), vol.Id).Return(nil, nil)
- mockClient.On("ListAttachmentsByVolumeId", context.NewAdminContext(), vol.Id).Return(nil, nil)
- mockClient.On("UpdateVolume", context.NewAdminContext(), in).Return(nil, nil)
- db.C = mockClient
-
- err := DeleteVolumeDBEntry(context.NewAdminContext(), vol)
- if err != nil {
- t.Errorf("failed to delete volume, err is %v\n", err)
- }
- })
-
- t.Run("Volume to be deleted should not contain any snapshots", func(t *testing.T) {
- var sampleSnapshots = []*model.VolumeSnapshotSpec{&SampleSnapshots[0]}
- // Considering vol has been updated inisde DeleteVolumeDBEntry, so the status
- // should be rolled back here.
- vol.Status = model.VolumeAvailable
- mockClient := new(dbtest.Client)
- mockClient.On("DeleteVolume", context.NewAdminContext(), vol.Id).Return(nil)
- mockClient.On("ListSnapshotsByVolumeId", context.NewAdminContext(), vol.Id).Return(sampleSnapshots, nil)
- mockClient.On("ListAttachmentsByVolumeId", context.NewAdminContext(), vol.Id).Return(nil, nil)
- mockClient.On("UpdateVolume", context.NewAdminContext(), in).Return(nil, nil)
- db.C = mockClient
-
- err := DeleteVolumeDBEntry(context.NewAdminContext(), vol)
- expectedError := fmt.Sprintf("volume %s can not be deleted, because it still has snapshots", in.Id)
- assertTestResult(t, err.Error(), expectedError)
- })
-
- t.Run("Volume to be deleted should not be in-use", func(t *testing.T) {
- var sampleAttachments = []*model.VolumeAttachmentSpec{&SampleAttachments[0]}
- // Considering vol has been updated inisde DeleteVolumeDBEntry, so the status
- // should be rolled back here.
- vol.Status = model.VolumeAvailable
- mockClient := new(dbtest.Client)
- mockClient.On("DeleteVolume", context.NewAdminContext(), vol.Id).Return(nil)
- mockClient.On("ListSnapshotsByVolumeId", context.NewAdminContext(), vol.Id).Return(nil, nil)
- mockClient.On("ListAttachmentsByVolumeId", context.NewAdminContext(), vol.Id).Return(sampleAttachments, nil)
- mockClient.On("UpdateVolume", context.NewAdminContext(), in).Return(nil, nil)
- db.C = mockClient
-
- err := DeleteVolumeDBEntry(context.NewAdminContext(), vol)
- expectedError := fmt.Sprintf("volume %s can not be deleted, because it's in use", in.Id)
- assertTestResult(t, err.Error(), expectedError)
- })
-}
-
-func TestExtendVolumeDBEntry(t *testing.T) {
- var vol = &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: "bd5b12a8-a101-11e7-941e-d77981b584d8",
- },
- Status: model.VolumeAvailable,
- Size: 2,
- }
- var in = &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: "bd5b12a8-a101-11e7-941e-d77981b584d8",
- },
- Status: model.VolumeExtending,
- Size: 2,
- }
-
- t.Run("Everything should work well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetVolume", context.NewAdminContext(), "bd5b12a8-a101-11e7-941e-d77981b584d8").Return(vol, nil)
- mockClient.On("ExtendVolume", context.NewAdminContext(), in).Return(nil, nil)
- db.C = mockClient
-
- _, err := ExtendVolumeDBEntry(context.NewAdminContext(), vol.Id, &model.ExtendVolumeSpec{NewSize: 20})
- if err != nil {
- t.Errorf("failed to extend volume: %v\n", err)
- }
- })
-
- t.Run("The status of volume should always be available", func(t *testing.T) {
- vol.Status = model.VolumeCreating
- mockClient := new(dbtest.Client)
- mockClient.On("GetVolume", context.NewAdminContext(), "bd5b12a8-a101-11e7-941e-d77981b584d8").Return(vol, nil)
- mockClient.On("ExtendVolume", context.NewAdminContext(), in).Return(nil, nil)
- db.C = mockClient
-
- _, err := ExtendVolumeDBEntry(context.NewAdminContext(), vol.Id, &model.ExtendVolumeSpec{NewSize: 20})
- expectedError := "the status of the volume to be extended must be available!"
- assertTestResult(t, err.Error(), expectedError)
- })
-
- t.Run("The extended size should always be larger than current size", func(t *testing.T) {
- vol.Size, vol.Status = 20, model.VolumeAvailable
- in.Size = 20
- mockClient := new(dbtest.Client)
- mockClient.On("GetVolume", context.NewAdminContext(), "bd5b12a8-a101-11e7-941e-d77981b584d8").Return(vol, nil)
- mockClient.On("ExtendVolume", context.NewAdminContext(), in).Return(nil, nil)
- db.C = mockClient
-
- _, err := ExtendVolumeDBEntry(context.NewAdminContext(), vol.Id, &model.ExtendVolumeSpec{NewSize: 2})
- expectedError := "new size for extend must be greater than current size." +
- "(current: 20 GB, extended: 2 GB)."
- assertTestResult(t, err.Error(), expectedError)
- })
-}
-
-func TestCreateVolumeSnapshotDBEntry(t *testing.T) {
- var vol = &model.VolumeSpec{
- BaseModel: &model.BaseModel{
- Id: "bd5b12a8-a101-11e7-941e-d77981b584d8",
- },
- Size: 1,
- Status: "available",
- }
- var req = &model.VolumeSnapshotSpec{
- BaseModel: &model.BaseModel{},
- VolumeId: "bd5b12a8-a101-11e7-941e-d77981b584d8",
- Name: "sample-snapshot-01",
- Description: "This is the first sample snapshot for testing",
- Size: int64(1),
- ProfileId: "3769855c-a102-11e7-b772-17b880d2f537",
- Status: "creating",
- Metadata: map[string]string{"a": "a"},
- }
-
- t.Run("Everything should work well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetVolume", context.NewAdminContext(), "bd5b12a8-a101-11e7-941e-d77981b584d8").Return(vol, nil)
- mockClient.On("CreateVolumeSnapshot", context.NewAdminContext(), req).Return(&SampleSnapshots[0], nil)
- db.C = mockClient
-
- var expected = &SampleSnapshots[0]
- result, err := CreateVolumeSnapshotDBEntry(context.NewAdminContext(), req)
- if err != nil {
- t.Errorf("failed to create volume snapshot, err is %v\n", err)
- }
- assertTestResult(t, result, expected)
- })
-
- t.Run("The profile id should not be empty", func(t *testing.T) {
- req.ProfileId = ""
- mockClient := new(dbtest.Client)
- mockClient.On("GetVolume", context.NewAdminContext(), "bd5b12a8-a101-11e7-941e-d77981b584d8").Return(vol, nil)
- mockClient.On("CreateVolumeSnapshot", context.NewAdminContext(), req).Return(&SampleSnapshots[0], nil)
- db.C = mockClient
-
- _, err := CreateVolumeSnapshotDBEntry(context.NewAdminContext(), req)
- expectedError := "profile id can not be empty when creating volume snapshot in db"
- assertTestResult(t, err.Error(), expectedError)
- })
-}
-
-func TestDeleteVolumeSnapshotDBEntry(t *testing.T) {
- var req = &model.VolumeSnapshotSpec{
- BaseModel: &model.BaseModel{
- Id: "3769855c-a102-11e7-b772-17b880d2f537",
- },
- VolumeId: "bd5b12a8-a101-11e7-941e-d77981b584d8",
- Status: "available",
- }
-
- t.Run("Everything should work well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("UpdateVolumeSnapshot", context.NewAdminContext(), "3769855c-a102-11e7-b772-17b880d2f537", req).Return(nil, nil)
- mockClient.On("GetVolume", context.NewAdminContext(), req.VolumeId).Return(nil, nil)
- db.C = mockClient
-
- err := DeleteVolumeSnapshotDBEntry(context.NewAdminContext(), req)
- if err != nil {
- t.Errorf("failed to delete volume snapshot, err is %v\n", err)
- }
- })
-}
-
-func TestCreateFileShareSnapshotDBEntry(t *testing.T) {
- var fileshare = &model.FileShareSpec{
- BaseModel: &model.BaseModel{
- Id: "bd5b12a8-a101-11e7-941e-d77981b584d8",
- },
- Status: "available",
- }
- var req = &model.FileShareSnapshotSpec{
- BaseModel: &model.BaseModel{
- Id: "3769855c-a102-11e7-b772-17b880d2f537",
- },
- Name: "sample-snapshot-01",
- Description: "This is the first sample snapshot for testing",
- Status: "available",
- ShareSize: int64(1),
- FileShareId: "bd5b12a8-a101-11e7-941e-d77981b584d8",
- ProfileId: "1106b972-66ef-11e7-b172-db03f3689c9c",
- }
-
- var sampleSnapshots = []*model.FileShareSnapshotSpec{&SampleShareSnapshots[0]}
- t.Run("-ve test case - snapshot name already exists", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetFileShare", context.NewAdminContext(), "bd5b12a8-a101-11e7-941e-d77981b584d8").Return(fileshare, nil)
- mockClient.On("ListFileShareSnapshots", context.NewAdminContext()).Return(sampleSnapshots, nil)
- db.C = mockClient
-
- _, err := CreateFileShareSnapshotDBEntry(context.NewAdminContext(), req)
- expectedError := "file share snapshot name already exists"
- assertTestResult(t, err.Error(), expectedError)
- })
-
- t.Run("test +ve", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetFileShare", context.NewAdminContext(), "bd5b12a8-a101-11e7-941e-d77981b584d8").Return(fileshare, nil)
- mockClient.On("ListFileShareSnapshots", context.NewAdminContext()).Return(nil, nil)
- mockClient.On("CreateFileShareSnapshot", context.NewAdminContext(), req).Return(&SampleShareSnapshots[0], nil)
- db.C = mockClient
-
- var expected = &SampleShareSnapshots[0]
- result, err := CreateFileShareSnapshotDBEntry(context.NewAdminContext(), req)
- if err != nil {
- t.Errorf("failed to create fileshare snapshot, err is %v\n", err)
- }
- assertTestResult(t, result, expected)
- })
-
-}
-
-func TestCreateFileShareDBEntry(t *testing.T) {
- var in = &model.FileShareSpec{
- BaseModel: &model.BaseModel{},
- Name: "sample-fileshare-01",
- Description: "This is a sample fileshare for testing",
- Size: int64(1),
- ProfileId: "b3585ebe-c42c-120g-b28e-f373746a71ca",
- Status: model.FileShareCreating,
- }
-
- t.Run("Everything should work well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("CreateFileShare", context.NewAdminContext(), in).Return(&SampleFileShares[0], nil)
- db.C = mockClient
-
- var expected = &SampleFileShares[0]
- result, err := CreateFileShareDBEntry(context.NewAdminContext(), in)
- if err != nil {
- t.Errorf("failed to create fileshare err is %v\n", err)
- }
- assertTestResult(t, result, expected)
- })
-
- t.Run("The size of fileshare created should be greater than zero", func(t *testing.T) {
- in.Size = int64(-2)
- mockClient := new(dbtest.Client)
- mockClient.On("CreateFileShare", context.NewAdminContext(), in).Return(&SampleFileShares[0], nil)
- db.C = mockClient
-
- _, err := CreateFileShareDBEntry(context.NewAdminContext(), in)
- expectedError := fmt.Sprintf("invalid fileshare size: %d", in.Size)
- assertTestResult(t, err.Error(), expectedError)
- })
-
- t.Run("The profile id should not be empty", func(t *testing.T) {
- in.ProfileId = ""
- mockClient := new(dbtest.Client)
- mockClient.On("CreateFileShare", context.NewAdminContext(), in).Return(&SampleFileShares[0], nil)
- db.C = mockClient
-
- _, err := CreateFileShareDBEntry(context.NewAdminContext(), in)
- expectedError := "profile id can not be empty when creating fileshare in db!"
- assertTestResult(t, err.Error(), expectedError)
- })
-
- t.Run("Empty file share name is allowed", func(t *testing.T) {
- in.Size, in.Name, in.ProfileId = int64(1), "", "b3585ebe-c42c-120g-b28e-f373746a71ca"
- mockClient := new(dbtest.Client)
- mockClient.On("CreateFileShare", context.NewAdminContext(), in).Return(&SampleFileShares[0], nil)
- db.C = mockClient
-
- _, err := CreateFileShareDBEntry(context.NewAdminContext(), in)
- expectedError := "empty fileshare name is not allowed. Please give valid name."
- assertTestResult(t, err.Error(), expectedError)
- })
-
- t.Run("File share name length equal to 0 character are not allowed", func(t *testing.T) {
- in.Name = utils.RandSeqWithAlnum(0)
- in.Size, in.ProfileId = int64(1), "b3585ebe-c42c-120g-b28e-f373746a71ca"
- mockClient := new(dbtest.Client)
- mockClient.On("CreateFileShare", context.NewAdminContext(), in).Return(&SampleFileShares[0], nil)
- db.C = mockClient
-
- _, err := CreateFileShareDBEntry(context.NewAdminContext(), in)
- expectedError := "empty fileshare name is not allowed. Please give valid name."
- assertTestResult(t, err.Error(), expectedError)
- })
-
- t.Run("File share name length equal to 1 character are allowed", func(t *testing.T) {
- in.Name = utils.RandSeqWithAlnum(1)
- in.Size, in.ProfileId = int64(1), "b3585ebe-c42c-120g-b28e-f373746a71ca"
- mockClient := new(dbtest.Client)
- mockClient.On("CreateFileShare", context.NewAdminContext(), in).Return(&SampleFileShares[0], nil)
- db.C = mockClient
-
- var expected = &SampleFileShares[0]
- result, err := CreateFileShareDBEntry(context.NewAdminContext(), in)
- if err != nil {
- t.Errorf("failed to create fileshare err is %v\n", err)
- }
- assertTestResult(t, result, expected)
- })
-
- t.Run("File share name length equal to 10 characters are allowed", func(t *testing.T) {
- in.Name = utils.RandSeqWithAlnum(10)
- in.Size, in.ProfileId = int64(1), "b3585ebe-c42c-120g-b28e-f373746a71ca"
- mockClient := new(dbtest.Client)
- mockClient.On("CreateFileShare", context.NewAdminContext(), in).Return(&SampleFileShares[0], nil)
- db.C = mockClient
-
- var expected = &SampleFileShares[0]
- result, err := CreateFileShareDBEntry(context.NewAdminContext(), in)
- if err != nil {
- t.Errorf("failed to create fileshare err is %v\n", err)
- }
- assertTestResult(t, result, expected)
- })
-
- t.Run("File share name length equal to 254 characters are allowed", func(t *testing.T) {
- in.Name = utils.RandSeqWithAlnum(254)
- in.Size, in.ProfileId = int64(1), "b3585ebe-c42c-120g-b28e-f373746a71ca"
- mockClient := new(dbtest.Client)
- mockClient.On("CreateFileShare", context.NewAdminContext(), in).Return(&SampleFileShares[0], nil)
- db.C = mockClient
-
- var expected = &SampleFileShares[0]
- result, err := CreateFileShareDBEntry(context.NewAdminContext(), in)
- if err != nil {
- t.Errorf("failed to create fileshare err is %v\n", err)
- }
- assertTestResult(t, result, expected)
- })
-
- t.Run("File share name length equal to 255 characters are allowed", func(t *testing.T) {
- in.Name = utils.RandSeqWithAlnum(255)
- in.Size, in.ProfileId = int64(1), "b3585ebe-c42c-120g-b28e-f373746a71ca"
- mockClient := new(dbtest.Client)
- mockClient.On("CreateFileShare", context.NewAdminContext(), in).Return(&SampleFileShares[0], nil)
- db.C = mockClient
-
- var expected = &SampleFileShares[0]
- result, err := CreateFileShareDBEntry(context.NewAdminContext(), in)
- if err != nil {
- t.Errorf("failed to create fileshare err is %v\n", err)
- }
- assertTestResult(t, result, expected)
- })
-
- t.Run("File share name length more than 255 characters are not allowed", func(t *testing.T) {
- in.Name = utils.RandSeqWithAlnum(256)
- in.Size, in.ProfileId = int64(1), "b3585ebe-c42c-120g-b28e-f373746a71ca"
- mockClient := new(dbtest.Client)
- mockClient.On("CreateFileShare", context.NewAdminContext(), in).Return(&SampleFileShares[0], nil)
- db.C = mockClient
-
- _, err := CreateFileShareDBEntry(context.NewAdminContext(), in)
- expectedError := "fileshare name length should not be more than 255 characters. input name length is : " + strconv.Itoa(len(in.Name))
- assertTestResult(t, err.Error(), expectedError)
- })
-
- t.Run("File share name length more than 255 characters are not allowed", func(t *testing.T) {
- in.Name = utils.RandSeqWithAlnum(257)
- in.Size, in.ProfileId = int64(1), "b3585ebe-c42c-120g-b28e-f373746a71ca"
- mockClient := new(dbtest.Client)
- mockClient.On("CreateFileShare", context.NewAdminContext(), in).Return(&SampleFileShares[0], nil)
- db.C = mockClient
-
- _, err := CreateFileShareDBEntry(context.NewAdminContext(), in)
- expectedError := "fileshare name length should not be more than 255 characters. input name length is : " + strconv.Itoa(len(in.Name))
- assertTestResult(t, err.Error(), expectedError)
- })
-}
-
-func TestDeleteFileShareDBEntry(t *testing.T) {
- var fileshare = &model.FileShareSpec{
- BaseModel: &model.BaseModel{
- Id: "bd5b12a8-a101-11e7-941e-d77981b584d8",
- },
- Status: model.FileShareAvailable,
- ProfileId: "3769855c-a102-11e7-b772-17b880d2f537",
- PoolId: "3762355c-a102-11e7-b772-17b880d2f537",
- }
- var in = &model.FileShareSpec{
- BaseModel: &model.BaseModel{
- Id: "bd5b12a8-a101-11e7-941e-d77981b584d8",
- },
- Status: model.FileShareInUse,
- ProfileId: "3769855c-a102-11e7-b772-17b880d2f537",
- PoolId: "3762355c-a102-11e7-b772-17b880d2f537",
- }
- t.Run("FileShare to be deleted should not be in-use", func(t *testing.T) {
- fileshare.Status = model.FileShareInUse
- mockClient := new(dbtest.Client)
- mockClient.On("ListSnapshotsByShareId", context.NewAdminContext(), fileshare.Id).Return(nil, nil)
- mockClient.On("ListFileShareAclsByShareId", context.NewAdminContext(), fileshare.Id).Return(nil, nil)
- mockClient.On("UpdateFileShare", context.NewAdminContext(), in).Return(nil, nil)
- mockClient.On("DeleteFileShare", context.NewAdminContext(), fileshare.Id).Return(nil)
- db.C = mockClient
-
- err := DeleteFileShareDBEntry(context.NewAdminContext(), fileshare)
- expectedError := fmt.Sprintf("only the fileshare with the status available, error, errorDeleting, can be deleted, the fileshare status is %s", in.Status)
- assertTestResult(t, err.Error(), expectedError)
- })
-
- var sampleSnapshots = []*model.FileShareSnapshotSpec{&SampleShareSnapshots[0]}
- t.Run("FileShare should not be deleted if it has dependent snapshots", func(t *testing.T) {
- //in.Status = model.FileShareAvailable
- fileshare.Status = model.FileShareAvailable
- mockClient := new(dbtest.Client)
- mockClient.On("ListSnapshotsByShareId", context.NewAdminContext(), fileshare.Id).Return(sampleSnapshots, nil)
- db.C = mockClient
-
- err := DeleteFileShareDBEntry(context.NewAdminContext(), fileshare)
- expectedError := fmt.Sprintf("file share %s can not be deleted, because it still has snapshots", in.Id)
- assertTestResult(t, err.Error(), expectedError)
- })
-
- var sampleAcls = []*model.FileShareAclSpec{&SampleFileSharesAcl[2]}
- t.Run("FileShare should not be deleted if it has dependent acls", func(t *testing.T) {
- //in.Status = model.FileShareAvailable
- fileshare.Status = model.FileShareAvailable
- mockClient := new(dbtest.Client)
- mockClient.On("ListSnapshotsByShareId", context.NewAdminContext(), fileshare.Id).Return(nil, nil)
- mockClient.On("ListFileShareAclsByShareId", context.NewAdminContext(), fileshare.Id).Return(sampleAcls, nil)
- db.C = mockClient
-
- err := DeleteFileShareDBEntry(context.NewAdminContext(), fileshare)
- expectedError := fmt.Sprintf("file share %s can not be deleted, because it still has acls", in.Id)
- assertTestResult(t, err.Error(), expectedError)
- })
-
- t.Run("FileShare deletion when it is available", func(t *testing.T) {
- in.Status = model.FileShareDeleting
- //fileshare.Status = model.FileShareAvailable
- mockClient := new(dbtest.Client)
- mockClient.On("ListSnapshotsByShareId", context.NewAdminContext(), fileshare.Id).Return(nil, nil)
- mockClient.On("ListFileShareAclsByShareId", context.NewAdminContext(), fileshare.Id).Return(nil, nil)
- mockClient.On("UpdateFileShare", context.NewAdminContext(), in).Return(nil, nil)
- mockClient.On("DeleteFileShare", context.NewAdminContext(), fileshare.Id).Return(nil)
- db.C = mockClient
-
- err := DeleteFileShareDBEntry(context.NewAdminContext(), fileshare)
- if err != nil {
- t.Errorf("failed to delete fileshare, err is %v\n", err)
- }
- })
-}
-
-func TestDeleteFileShareAclDBEntry(t *testing.T) {
- var in = &model.FileShareAclSpec{
- BaseModel: &model.BaseModel{
- Id: "d2975ebe-d82c-430f-b28e-f373746a71ca",
- },
- Status: model.FileShareAclAvailable,
- ProfileId: "3769855c-a102-11e7-b772-17b880d2f537",
- FileShareId: "bd5b12a8-a101-11e7-941e-d77981b584d8",
- Type: "ip",
- AccessTo: "10.21.23.10",
- AccessCapability: []string{"Read", "Write"},
- }
- var out = &model.FileShareAclSpec{
- BaseModel: &model.BaseModel{
- Id: "d2975ebe-d82c-430f-b28e-f373746a71ca",
- },
- Status: model.FileShareAclDeleting,
- ProfileId: "3769855c-a102-11e7-b772-17b880d2f537",
- FileShareId: "bd5b12a8-a101-11e7-941e-d77981b584d8",
- Type: "ip",
- AccessTo: "10.21.23.10",
- AccessCapability: []string{"Read", "Write"},
- }
-
- t.Run("FileShareAcl to be deleted should not be in-use", func(t *testing.T) {
- in.Status = model.FileShareAclInUse
- mockClient := new(dbtest.Client)
- mockClient.On("GetFileShare", context.NewAdminContext(), in.FileShareId).Return(nil, nil)
- mockClient.On("DeleteFileShareAcl", context.NewAdminContext(), in.Id).Return(nil, nil)
- mockClient.On("UpdateFileShareAcl", context.NewAdminContext(), in).Return(nil, nil)
- db.C = mockClient
-
- err := DeleteFileShareAclDBEntry(context.NewAdminContext(), in)
- expectedError := fmt.Sprintf("only the file share acl with the status available, error, error_deleting can be deleted, the fileshare status is %s", in.Status)
- assertTestResult(t, err.Error(), expectedError)
- })
-
- t.Run("FileShareAcl deletion when everything works fine", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- in.Status = model.FileShareAclAvailable
- mockClient.On("GetFileShare", context.NewAdminContext(), in.FileShareId).Return(&SampleFileShares[0], nil)
- mockClient.On("DeleteFileShareAcl", context.NewAdminContext(), in.Id).Return(nil, nil)
- mockClient.On("UpdateFileShareAcl", context.NewAdminContext(), in).Return(out, nil)
- db.C = mockClient
-
- err := DeleteFileShareAclDBEntry(context.NewAdminContext(), in)
- if err != nil {
- t.Errorf("failed delete fileshare acl in db:%v\n", err)
- }
- })
-}
-
-func TestCreateFileShareAclDBEntry(t *testing.T) {
- var in = &model.FileShareAclSpec{
- BaseModel: &model.BaseModel{
- Id: "6ad25d59-a160-45b2-8920-211be282e2df",
- },
- Description: "This is a sample Acl for testing",
- ProfileId: "1106b972-66ef-11e7-b172-db03f3689c9c",
- Type: "ip",
- AccessCapability: []string{"Read", "Write"},
- AccessTo: "10.32.109.15",
- FileShareId: "d2975ebe-d82c-430f-b28e-f373746a71ca",
- }
-
- t.Run("Everything should work well", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetFileShare", context.NewAdminContext(), in.FileShareId).Return(&SampleFileShares[0], nil)
- mockClient.On("CreateFileShareAcl", context.NewAdminContext(), in).Return(&SampleFileSharesAcl[2], nil)
- db.C = mockClient
-
- var expected = &SampleFileSharesAcl[2]
- result, err := CreateFileShareAclDBEntry(context.NewAdminContext(), in)
- if err != nil {
- t.Errorf("failed to create fileshare err is %v\n", err)
- }
- assertTestResult(t, result, expected)
- })
-
- t.Run("If profile id is empty", func(t *testing.T) {
- in.ProfileId = ""
- mockClient := new(dbtest.Client)
- mockClient.On("GetFileShare", context.NewAdminContext(), in.FileShareId).Return(&SampleFileShares[0], nil)
- mockClient.On("CreateFileShareAcl", context.NewAdminContext(), in).Return(&SampleFileSharesAcl[2], nil)
- db.C = mockClient
-
- _, err := CreateFileShareAclDBEntry(context.NewAdminContext(), in)
- expectedError := "profile id can not be empty when creating fileshare acl in db!"
- assertTestResult(t, err.Error(), expectedError)
- })
-
- t.Run("Invalid Access Type", func(t *testing.T) {
- in.ProfileId, in.Type = "d2975ebe-d82c-430f-b28e-f373746a71ca", "system"
- mockClient := new(dbtest.Client)
- mockClient.On("GetFileShare", context.NewAdminContext(), in.FileShareId).Return(&SampleFileShares[0], nil)
- mockClient.On("CreateFileShareAcl", context.NewAdminContext(), in).Return(&SampleFileSharesAcl[2], nil)
- db.C = mockClient
-
- _, err := CreateFileShareAclDBEntry(context.NewAdminContext(), in)
- expectedError := fmt.Sprintf("invalid fileshare type: %v. Supported type is: ip", in.Type)
- assertTestResult(t, err.Error(), expectedError)
- })
-
- t.Run("Empty Access To", func(t *testing.T) {
- in.ProfileId, in.Type, in.AccessTo = "d2975ebe-d82c-430f-b28e-f373746a71ca", "ip", ""
- mockClient := new(dbtest.Client)
- mockClient.On("GetFileShare", context.NewAdminContext(), in.FileShareId).Return(&SampleFileShares[0], nil)
- mockClient.On("CreateFileShareAcl", context.NewAdminContext(), in).Return(&SampleFileSharesAcl[2], nil)
- db.C = mockClient
-
- _, err := CreateFileShareAclDBEntry(context.NewAdminContext(), in)
- expectedError := "accessTo is empty. Please give valid ip segment"
- assertTestResult(t, err.Error(), expectedError)
- })
-
- t.Run("Invalid Ip Segment", func(t *testing.T) {
- in.ProfileId, in.Type, in.AccessTo = "d2975ebe-d82c-430f-b28e-f373746a71ca", "ip", "201.100.101.8/9.9"
- mockClient := new(dbtest.Client)
- mockClient.On("GetFileShare", context.NewAdminContext(), in.FileShareId).Return(&SampleFileShares[0], nil)
- mockClient.On("CreateFileShareAcl", context.NewAdminContext(), in).Return(&SampleFileSharesAcl[2], nil)
- db.C = mockClient
-
- _, err := CreateFileShareAclDBEntry(context.NewAdminContext(), in)
- expectedError := fmt.Sprintf("invalid IP segment %v", in.AccessTo)
- assertTestResult(t, err.Error(), expectedError)
- })
-
- t.Run("Invalid Ip", func(t *testing.T) {
- in.ProfileId, in.Type, in.AccessTo = "d2975ebe-d82c-430f-b28e-f373746a71ca", "ip", "201.100.101"
- mockClient := new(dbtest.Client)
- mockClient.On("GetFileShare", context.NewAdminContext(), in.FileShareId).Return(&SampleFileShares[0], nil)
- mockClient.On("CreateFileShareAcl", context.NewAdminContext(), in).Return(&SampleFileSharesAcl[2], nil)
- db.C = mockClient
-
- _, err := CreateFileShareAclDBEntry(context.NewAdminContext(), in)
- expectedError := fmt.Sprintf("%v is not a valid ip. Please give the proper ip", in.AccessTo)
- assertTestResult(t, err.Error(), expectedError)
- })
-
- t.Run("Empty accesscapability", func(t *testing.T) {
- in.ProfileId, in.Type, in.AccessTo, in.AccessCapability = "d2975ebe-d82c-430f-b28e-f373746a71ca", "ip", "201.100.101.9", []string{}
- mockClient := new(dbtest.Client)
- mockClient.On("GetFileShare", context.NewAdminContext(), in.FileShareId).Return(&SampleFileShares[0], nil)
- mockClient.On("CreateFileShareAcl", context.NewAdminContext(), in).Return(&SampleFileSharesAcl[2], nil)
- db.C = mockClient
-
- _, err := CreateFileShareAclDBEntry(context.NewAdminContext(), in)
- expectedError := fmt.Sprintf("empty fileshare accesscapability. Supported accesscapability are: {read, write}")
- assertTestResult(t, err.Error(), expectedError)
- })
-
- t.Run("Invalid accesscapabilities", func(t *testing.T) {
- in.ProfileId, in.Type, in.AccessTo, in.AccessCapability = "d2975ebe-d82c-430f-b28e-f373746a71ca", "ip", "201.100.101.9", []string{"read", "execute"}
- mockClient := new(dbtest.Client)
- mockClient.On("GetFileShare", context.NewAdminContext(), in.FileShareId).Return(&SampleFileShares[0], nil)
- mockClient.On("CreateFileShareAcl", context.NewAdminContext(), in).Return(&SampleFileSharesAcl[2], nil)
- db.C = mockClient
-
- value := "execute"
- _, err := CreateFileShareAclDBEntry(context.NewAdminContext(), in)
- expectedError := fmt.Sprintf("invalid fileshare accesscapability: %v. Supported accesscapability are: {read, write}", value)
- assertTestResult(t, err.Error(), expectedError)
- })
-
- t.Run("Invalid fileshare id given", func(t *testing.T) {
- in.ProfileId, in.Type, in.AccessTo, in.AccessCapability = "d2975ebe-d82c-430f-b28e-f373746a71ca", "ip", "201.100.101.9", []string{"read"}
- mockClient := new(dbtest.Client)
- mockClient.On("GetFileShare", context.NewAdminContext(), in.FileShareId).Return(&SampleFileShares[0], nil)
- SampleFileShares[0].Status = model.FileShareError
- mockClient.On("CreateFileShareAcl", context.NewAdminContext(), in).Return(&SampleFileSharesAcl[2], nil)
- db.C = mockClient
-
- _, err := CreateFileShareAclDBEntry(context.NewAdminContext(), in)
- expectedError := "only the status of file share is available, the acl can be created"
- assertTestResult(t, err.Error(), expectedError)
- })
-}
-
-func TestDeleteFileShareSnapshotDBEntry(t *testing.T) {
- var in = &SampleFileShareSnapshots[0]
-
- t.Run("When everything works fine", func(t *testing.T) {
- mockClient := new(dbtest.Client)
- mockClient.On("GetFileShare", context.NewAdminContext(), in.FileShareId).Return(&SampleFileShares[0], nil)
- mockClient.On("DeleteFileShareSnapshot", context.NewAdminContext(), in.Id).Return(nil, nil)
- mockClient.On("UpdateFileShareSnapshot", context.NewAdminContext(), in.Id, in).Return(nil, nil)
- db.C = mockClient
-
- err := DeleteFileShareSnapshotDBEntry(context.NewAdminContext(), in)
- if err != nil {
- t.Errorf("failed to delete fileshare snapshot, err is %v\n", err)
- }
- })
-
- t.Run("File status not available", func(t *testing.T) {
- in.Status = model.FileShareAclInUse
- mockClient := new(dbtest.Client)
- mockClient.On("GetFileShare", context.NewAdminContext(), in.FileShareId).Return(nil, nil)
- mockClient.On("DeleteFileShareSnapshot", context.NewAdminContext(), in.Id).Return(nil, nil)
- mockClient.On("UpdateFileShareSnapshot", context.NewAdminContext(), in.Id, in).Return(nil, nil)
- db.C = mockClient
-
- err := DeleteFileShareSnapshotDBEntry(context.NewAdminContext(), in)
- expectedError := fmt.Sprintf("only the fileshare snapshot with the status available, error, error_deleting can be deleted, the fileshare status is %s", in.Status)
- assertTestResult(t, err.Error(), expectedError)
- })
-}
diff --git a/pkg/context/context.go b/pkg/context/context.go
index ef5f9ac95..6f7c675ec 100644
--- a/pkg/context/context.go
+++ b/pkg/context/context.go
@@ -24,7 +24,7 @@ import (
"github.com/astaxie/beego/context"
log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/model"
+ "github.com/sodafoundation/controller/pkg/model"
)
func NewAdminContext() *Context {
diff --git a/pkg/controller/client/client.go b/pkg/controller/client/client.go
index 8fdd70fdb..30c524da7 100644
--- a/pkg/controller/client/client.go
+++ b/pkg/controller/client/client.go
@@ -18,7 +18,7 @@ import (
"time"
log "github.com/golang/glog"
- pb "github.com/opensds/opensds/pkg/model/proto"
+ pb "github.com/sodafoundation/controller/pkg/model/proto"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go
index 0754781ab..b43e4158c 100755
--- a/pkg/controller/controller.go
+++ b/pkg/controller/controller.go
@@ -25,17 +25,17 @@ import (
"fmt"
log "github.com/golang/glog"
- osdsCtx "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/controller/dr"
- "github.com/opensds/opensds/pkg/controller/fileshare"
- "github.com/opensds/opensds/pkg/controller/metrics"
- "github.com/opensds/opensds/pkg/controller/policy"
- "github.com/opensds/opensds/pkg/controller/selector"
- "github.com/opensds/opensds/pkg/controller/volume"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- "github.com/opensds/opensds/pkg/utils"
+ osdsCtx "github.com/sodafoundation/controller/pkg/context"
+ "github.com/sodafoundation/controller/pkg/controller/dr"
+ "github.com/sodafoundation/controller/pkg/controller/fileshare"
+ "github.com/sodafoundation/controller/pkg/controller/metrics"
+ "github.com/sodafoundation/controller/pkg/controller/policy"
+ "github.com/sodafoundation/controller/pkg/controller/selector"
+ "github.com/sodafoundation/controller/pkg/controller/volume"
+ "github.com/sodafoundation/controller/pkg/db"
+ "github.com/sodafoundation/controller/pkg/model"
+ pb "github.com/sodafoundation/controller/pkg/model/proto"
+ "github.com/sodafoundation/controller/pkg/utils"
)
const (
diff --git a/pkg/controller/controller_test.go b/pkg/controller/controller_test.go
index 9be5013d4..de08bc6d4 100755
--- a/pkg/controller/controller_test.go
+++ b/pkg/controller/controller_test.go
@@ -20,15 +20,15 @@ import (
"fmt"
"testing"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/controller/dr"
- "github.com/opensds/opensds/pkg/controller/fileshare"
- "github.com/opensds/opensds/pkg/controller/volume"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- . "github.com/opensds/opensds/testutils/collection"
- dbtest "github.com/opensds/opensds/testutils/db/testing"
+ c "github.com/sodafoundation/controller/pkg/context"
+ "github.com/sodafoundation/controller/pkg/controller/dr"
+ "github.com/sodafoundation/controller/pkg/controller/fileshare"
+ "github.com/sodafoundation/controller/pkg/controller/volume"
+ "github.com/sodafoundation/controller/pkg/db"
+ "github.com/sodafoundation/controller/pkg/model"
+ pb "github.com/sodafoundation/controller/pkg/model/proto"
+ . "github.com/sodafoundation/controller/testutils/collection"
+ dbtest "github.com/sodafoundation/controller/testutils/db/testing"
)
type fakeSelector struct {
diff --git a/pkg/dock/client/client.go b/pkg/controller/dockcontroller/client/client.go
similarity index 97%
rename from pkg/dock/client/client.go
rename to pkg/controller/dockcontroller/client/client.go
index d93229c02..d27b9b358 100755
--- a/pkg/dock/client/client.go
+++ b/pkg/controller/dockcontroller/client/client.go
@@ -18,7 +18,7 @@ import (
"time"
log "github.com/golang/glog"
- pb "github.com/opensds/opensds/pkg/model/proto"
+ pb "github.com/sodafoundation/controller/pkg/model/proto"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/keepalive"
diff --git a/pkg/controller/dr/drcontroller.go b/pkg/controller/dr/drcontroller.go
index 06fa5488b..8c01a48f7 100644
--- a/pkg/controller/dr/drcontroller.go
+++ b/pkg/controller/dr/drcontroller.go
@@ -20,13 +20,13 @@ import (
"strings"
log "github.com/golang/glog"
- "github.com/opensds/opensds/contrib/drivers/utils/config"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/controller/volume"
- "github.com/opensds/opensds/pkg/db"
- . "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- "github.com/opensds/opensds/pkg/utils"
+ "github.com/sodafoundation/controller/pkg/driverconfig"
+ c "github.com/sodafoundation/controller/pkg/context"
+ "github.com/sodafoundation/controller/pkg/controller/volume"
+ "github.com/sodafoundation/controller/pkg/db"
+ . "github.com/sodafoundation/controller/pkg/model"
+ pb "github.com/sodafoundation/controller/pkg/model/proto"
+ "github.com/sodafoundation/controller/pkg/utils"
uuid "github.com/satori/go.uuid"
)
diff --git a/pkg/controller/dr/drcontroller_test.go b/pkg/controller/dr/drcontroller_test.go
index f96bd9c9f..26d98534b 100644
--- a/pkg/controller/dr/drcontroller_test.go
+++ b/pkg/controller/dr/drcontroller_test.go
@@ -18,13 +18,13 @@ import (
"reflect"
"testing"
- "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/controller/volume"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- . "github.com/opensds/opensds/testutils/collection"
- dbtest "github.com/opensds/opensds/testutils/db/testing"
+ "github.com/sodafoundation/controller/pkg/context"
+ "github.com/sodafoundation/controller/pkg/controller/volume"
+ "github.com/sodafoundation/controller/pkg/db"
+ "github.com/sodafoundation/controller/pkg/model"
+ pb "github.com/sodafoundation/controller/pkg/model/proto"
+ . "github.com/sodafoundation/controller/testutils/collection"
+ dbtest "github.com/sodafoundation/controller/testutils/db/testing"
"github.com/stretchr/testify/mock"
)
diff --git a/pkg/controller/fileshare/filesharecontroller.go b/pkg/controller/fileshare/filesharecontroller.go
index 679c20f27..bb5a4d6fd 100644
--- a/pkg/controller/fileshare/filesharecontroller.go
+++ b/pkg/controller/fileshare/filesharecontroller.go
@@ -25,9 +25,9 @@ import (
"fmt"
log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/dock/client"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
+ "github.com/sodafoundation/controller/pkg/controller/dockcontroller/client"
+ "github.com/sodafoundation/controller/pkg/model"
+ pb "github.com/sodafoundation/controller/pkg/model/proto"
)
// Controller is an interface for exposing some operations of different file share
diff --git a/pkg/controller/fileshare/filesharecontroller_test.go b/pkg/controller/fileshare/filesharecontroller_test.go
index abe688a68..f82d7ce72 100644
--- a/pkg/controller/fileshare/filesharecontroller_test.go
+++ b/pkg/controller/fileshare/filesharecontroller_test.go
@@ -19,10 +19,10 @@ import (
"reflect"
"testing"
- "github.com/opensds/opensds/pkg/dock/client"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- . "github.com/opensds/opensds/testutils/collection"
+ "github.com/sodafoundation/controller/pkg/controller/dockcontroller/client"
+ "github.com/sodafoundation/controller/pkg/model"
+ pb "github.com/sodafoundation/controller/pkg/model/proto"
+ . "github.com/sodafoundation/controller/testutils/collection"
"google.golang.org/grpc"
)
diff --git a/pkg/controller/grpcserver.go b/pkg/controller/grpcserver.go
index 7ec120ff9..75b0d1cf8 100644
--- a/pkg/controller/grpcserver.go
+++ b/pkg/controller/grpcserver.go
@@ -21,7 +21,7 @@ import (
"runtime"
log "github.com/golang/glog"
- pb "github.com/opensds/opensds/pkg/model/proto"
+ pb "github.com/sodafoundation/controller/pkg/model/proto"
"google.golang.org/grpc"
)
diff --git a/pkg/controller/metrics/adapters/metrics_dispatcher.go b/pkg/controller/metrics/adapters/metrics_dispatcher.go
index ceae24d30..9bb905ae7 100644
--- a/pkg/controller/metrics/adapters/metrics_dispatcher.go
+++ b/pkg/controller/metrics/adapters/metrics_dispatcher.go
@@ -15,7 +15,7 @@ package adapters
import (
log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/model"
+ "github.com/sodafoundation/controller/pkg/model"
)
// A buffered channel that we can send work requests on.
diff --git a/pkg/controller/metrics/adapters/metrics_dispatcher_test.go b/pkg/controller/metrics/adapters/metrics_dispatcher_test.go
index ac700e4d6..3e80185c9 100644
--- a/pkg/controller/metrics/adapters/metrics_dispatcher_test.go
+++ b/pkg/controller/metrics/adapters/metrics_dispatcher_test.go
@@ -16,7 +16,7 @@ package adapters
import (
"testing"
- "github.com/opensds/opensds/pkg/model"
+ "github.com/sodafoundation/controller/pkg/model"
)
func TestSendMetricToRegisteredSenders(t *testing.T) {
diff --git a/pkg/controller/metrics/adapters/metrics_sender_intf.go b/pkg/controller/metrics/adapters/metrics_sender_intf.go
index e527e60b2..70ca08fa8 100644
--- a/pkg/controller/metrics/adapters/metrics_sender_intf.go
+++ b/pkg/controller/metrics/adapters/metrics_sender_intf.go
@@ -13,7 +13,7 @@
// limitations under the License.
package adapters
-import "github.com/opensds/opensds/pkg/model"
+import "github.com/sodafoundation/controller/pkg/model"
type MetricsSenderIntf interface {
GetMetricsSender() MetricsSenderIntf
diff --git a/pkg/controller/metrics/adapters/metrics_sender_to_kafka.go b/pkg/controller/metrics/adapters/metrics_sender_to_kafka.go
index 715735e2f..a99a8e6fc 100644
--- a/pkg/controller/metrics/adapters/metrics_sender_to_kafka.go
+++ b/pkg/controller/metrics/adapters/metrics_sender_to_kafka.go
@@ -18,8 +18,8 @@ import (
"encoding/json"
log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/model"
- . "github.com/opensds/opensds/pkg/utils/config"
+ "github.com/sodafoundation/controller/pkg/model"
+ . "github.com/sodafoundation/controller/pkg/utils/config"
"github.com/segmentio/kafka-go"
)
diff --git a/pkg/controller/metrics/adapters/metrics_sender_to_kafka_test.go b/pkg/controller/metrics/adapters/metrics_sender_to_kafka_test.go
index b711f650c..c8a13c6f0 100644
--- a/pkg/controller/metrics/adapters/metrics_sender_to_kafka_test.go
+++ b/pkg/controller/metrics/adapters/metrics_sender_to_kafka_test.go
@@ -16,7 +16,7 @@ package adapters
import (
"testing"
- "github.com/opensds/opensds/pkg/model"
+ "github.com/sodafoundation/controller/pkg/model"
)
func TestKafkaMetricsSender_GetMetricsSender(t *testing.T) {
diff --git a/pkg/controller/metrics/adapters/metrics_sender_to_prometheus.go b/pkg/controller/metrics/adapters/metrics_sender_to_prometheus.go
index 32b69b92e..74f047b18 100644
--- a/pkg/controller/metrics/adapters/metrics_sender_to_prometheus.go
+++ b/pkg/controller/metrics/adapters/metrics_sender_to_prometheus.go
@@ -19,8 +19,8 @@ import (
"time"
log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/model"
- . "github.com/opensds/opensds/pkg/utils/config"
+ "github.com/sodafoundation/controller/pkg/model"
+ . "github.com/sodafoundation/controller/pkg/utils/config"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/push"
)
diff --git a/pkg/controller/metrics/adapters/metrics_sender_to_prometheus_test.go b/pkg/controller/metrics/adapters/metrics_sender_to_prometheus_test.go
index 0d40aa974..aaaa51adf 100644
--- a/pkg/controller/metrics/adapters/metrics_sender_to_prometheus_test.go
+++ b/pkg/controller/metrics/adapters/metrics_sender_to_prometheus_test.go
@@ -16,7 +16,7 @@ package adapters
import (
"testing"
- "github.com/opensds/opensds/pkg/model"
+ "github.com/sodafoundation/controller/pkg/model"
)
var SamplemetricsSpec = []*model.MetricSpec{
diff --git a/pkg/controller/metrics/metrics_controller.go b/pkg/controller/metrics/metrics_controller.go
index 8379b5ae0..1007491bc 100755
--- a/pkg/controller/metrics/metrics_controller.go
+++ b/pkg/controller/metrics/metrics_controller.go
@@ -31,10 +31,10 @@ import (
"strconv"
log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/controller/metrics/adapters"
- "github.com/opensds/opensds/pkg/dock/client"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
+ "github.com/sodafoundation/controller/pkg/controller/metrics/adapters"
+ "github.com/sodafoundation/controller/pkg/controller/dockcontroller/client"
+ "github.com/sodafoundation/controller/pkg/model"
+ pb "github.com/sodafoundation/controller/pkg/model/proto"
)
// Controller is an interface for exposing some operations of metric controllers.
diff --git a/pkg/controller/policy/executor/executor.go b/pkg/controller/policy/executor/executor.go
index 1d3cd23e5..0e1a7ac32 100755
--- a/pkg/controller/policy/executor/executor.go
+++ b/pkg/controller/policy/executor/executor.go
@@ -24,8 +24,8 @@ import (
"errors"
log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
+ "github.com/sodafoundation/controller/pkg/model"
+ pb "github.com/sodafoundation/controller/pkg/model/proto"
)
// AsynchronizedExecutor
diff --git a/pkg/controller/policy/executor/executordeletesnapshot.go b/pkg/controller/policy/executor/executordeletesnapshot.go
index aea42af12..f868415f2 100755
--- a/pkg/controller/policy/executor/executordeletesnapshot.go
+++ b/pkg/controller/policy/executor/executordeletesnapshot.go
@@ -26,11 +26,11 @@ import (
"time"
log "github.com/golang/glog"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/dock/client"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
+ c "github.com/sodafoundation/controller/pkg/context"
+ "github.com/sodafoundation/controller/pkg/db"
+ "github.com/sodafoundation/controller/pkg/controller/dockcontroller/client"
+ "github.com/sodafoundation/controller/pkg/model"
+ pb "github.com/sodafoundation/controller/pkg/model/proto"
)
const (
diff --git a/pkg/controller/policy/executor/executorintervalsnapshot.go b/pkg/controller/policy/executor/executorintervalsnapshot.go
index 0ed90aebb..501b8edf9 100755
--- a/pkg/controller/policy/executor/executorintervalsnapshot.go
+++ b/pkg/controller/policy/executor/executorintervalsnapshot.go
@@ -29,9 +29,9 @@ import (
"time"
log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/dock/client"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
+ "github.com/sodafoundation/controller/pkg/controller/dockcontroller/client"
+ "github.com/sodafoundation/controller/pkg/model"
+ pb "github.com/sodafoundation/controller/pkg/model/proto"
)
type IntervalSnapshotExecutor struct {
diff --git a/pkg/controller/policy/policycontroller.go b/pkg/controller/policy/policycontroller.go
index bcfd00a86..2db0d4a46 100755
--- a/pkg/controller/policy/policycontroller.go
+++ b/pkg/controller/policy/policycontroller.go
@@ -21,8 +21,8 @@ profiles configured by admin.
package policy
import (
- "github.com/opensds/opensds/pkg/controller/policy/executor"
- "github.com/opensds/opensds/pkg/model"
+ "github.com/sodafoundation/controller/pkg/controller/policy/executor"
+ "github.com/sodafoundation/controller/pkg/model"
)
// Controller is an interface for exposing some operations of different policy
diff --git a/pkg/controller/policy/storagetag.go b/pkg/controller/policy/storagetag.go
index 9d144ecfd..99a660c6d 100755
--- a/pkg/controller/policy/storagetag.go
+++ b/pkg/controller/policy/storagetag.go
@@ -26,7 +26,7 @@ import (
"fmt"
log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/utils"
+ "github.com/sodafoundation/controller/pkg/utils"
)
const (
diff --git a/pkg/controller/selector/filter.go b/pkg/controller/selector/filter.go
index bf59f2ad3..89f473aa4 100644
--- a/pkg/controller/selector/filter.go
+++ b/pkg/controller/selector/filter.go
@@ -21,8 +21,8 @@ import (
"strings"
log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/model"
- "github.com/opensds/opensds/pkg/utils"
+ "github.com/sodafoundation/controller/pkg/model"
+ "github.com/sodafoundation/controller/pkg/utils"
)
// simplifyPoolCapabilityMap ...
diff --git a/pkg/controller/selector/filter_test.go b/pkg/controller/selector/filter_test.go
index 4b9cf8e82..5c99b93e1 100644
--- a/pkg/controller/selector/filter_test.go
+++ b/pkg/controller/selector/filter_test.go
@@ -24,9 +24,9 @@ import (
"reflect"
"testing"
- "github.com/opensds/opensds/pkg/model"
- "github.com/opensds/opensds/pkg/utils"
- . "github.com/opensds/opensds/testutils/collection"
+ "github.com/sodafoundation/controller/pkg/model"
+ "github.com/sodafoundation/controller/pkg/utils"
+ . "github.com/sodafoundation/controller/testutils/collection"
)
type FilterCaseSpec struct {
diff --git a/pkg/controller/selector/selector.go b/pkg/controller/selector/selector.go
index 590fea5b5..15c0a87ef 100755
--- a/pkg/controller/selector/selector.go
+++ b/pkg/controller/selector/selector.go
@@ -25,9 +25,9 @@ import (
"strconv"
log "github.com/golang/glog"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
+ c "github.com/sodafoundation/controller/pkg/context"
+ "github.com/sodafoundation/controller/pkg/db"
+ "github.com/sodafoundation/controller/pkg/model"
)
// Selector is an interface that exposes some operation of different selectors.
diff --git a/pkg/controller/selector/selector_test.go b/pkg/controller/selector/selector_test.go
index c3a3a36a7..06f04fc91 100644
--- a/pkg/controller/selector/selector_test.go
+++ b/pkg/controller/selector/selector_test.go
@@ -19,10 +19,10 @@ import (
"reflect"
"testing"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
- dbtest "github.com/opensds/opensds/testutils/db/testing"
+ c "github.com/sodafoundation/controller/pkg/context"
+ "github.com/sodafoundation/controller/pkg/db"
+ "github.com/sodafoundation/controller/pkg/model"
+ dbtest "github.com/sodafoundation/controller/testutils/db/testing"
)
func TestSelectSupportedPoolForVolume(t *testing.T) {
diff --git a/pkg/controller/volume/volumecontroller.go b/pkg/controller/volume/volumecontroller.go
index 1ac485490..4baec8d13 100755
--- a/pkg/controller/volume/volumecontroller.go
+++ b/pkg/controller/volume/volumecontroller.go
@@ -26,9 +26,9 @@ import (
"fmt"
log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/dock/client"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
+ "github.com/sodafoundation/controller/pkg/controller/dockcontroller/client"
+ "github.com/sodafoundation/controller/pkg/model"
+ pb "github.com/sodafoundation/controller/pkg/model/proto"
)
// Controller is an interface for exposing some operations of different volume
diff --git a/pkg/controller/volume/volumecontroller_test.go b/pkg/controller/volume/volumecontroller_test.go
index a9bbf7953..b873a53e0 100755
--- a/pkg/controller/volume/volumecontroller_test.go
+++ b/pkg/controller/volume/volumecontroller_test.go
@@ -19,10 +19,10 @@ import (
"reflect"
"testing"
- "github.com/opensds/opensds/pkg/dock/client"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- . "github.com/opensds/opensds/testutils/collection"
+ "github.com/sodafoundation/controller/pkg/controller/dockcontroller/client"
+ "github.com/sodafoundation/controller/pkg/model"
+ pb "github.com/sodafoundation/controller/pkg/model/proto"
+ . "github.com/sodafoundation/controller/testutils/collection"
"google.golang.org/grpc"
)
diff --git a/pkg/db/db.go b/pkg/db/db.go
index e40b8ad8b..fc05ab737 100755
--- a/pkg/db/db.go
+++ b/pkg/db/db.go
@@ -23,11 +23,11 @@ package db
import (
"fmt"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/db/drivers/etcd"
- "github.com/opensds/opensds/pkg/model"
- . "github.com/opensds/opensds/pkg/utils/config"
- fakedb "github.com/opensds/opensds/testutils/db"
+ c "github.com/sodafoundation/controller/pkg/context"
+ "github.com/sodafoundation/controller/pkg/db/drivers/etcd"
+ "github.com/sodafoundation/controller/pkg/model"
+ . "github.com/sodafoundation/controller/pkg/utils/config"
+ fakedb "github.com/sodafoundation/controller/testutils/db"
)
// C is a global variable that controls database module.
diff --git a/pkg/db/drivers/etcd/client.go b/pkg/db/drivers/etcd/client.go
index 587189fac..05dbefe10 100755
--- a/pkg/db/drivers/etcd/client.go
+++ b/pkg/db/drivers/etcd/client.go
@@ -21,12 +21,12 @@ import (
"time"
"github.com/coreos/etcd/pkg/transport"
- "github.com/opensds/opensds/pkg/utils/config"
- "github.com/opensds/opensds/pkg/utils/pwd"
+ "github.com/sodafoundation/controller/pkg/utils/config"
+ "github.com/sodafoundation/controller/pkg/utils/pwd"
"github.com/coreos/etcd/clientv3"
log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/utils"
+ "github.com/sodafoundation/controller/pkg/utils"
)
var (
diff --git a/pkg/db/drivers/etcd/etcd.go b/pkg/db/drivers/etcd/etcd.go
index 259319367..c7ca4e85a 100644
--- a/pkg/db/drivers/etcd/etcd.go
+++ b/pkg/db/drivers/etcd/etcd.go
@@ -30,14 +30,14 @@ import (
"strings"
"time"
- "github.com/opensds/opensds/pkg/utils/config"
+ "github.com/sodafoundation/controller/pkg/utils/config"
log "github.com/golang/glog"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/model"
- "github.com/opensds/opensds/pkg/utils"
- "github.com/opensds/opensds/pkg/utils/constants"
- "github.com/opensds/opensds/pkg/utils/urls"
+ c "github.com/sodafoundation/controller/pkg/context"
+ "github.com/sodafoundation/controller/pkg/model"
+ "github.com/sodafoundation/controller/pkg/utils"
+ "github.com/sodafoundation/controller/pkg/utils/constants"
+ "github.com/sodafoundation/controller/pkg/utils/urls"
uuid "github.com/satori/go.uuid"
)
diff --git a/pkg/db/drivers/etcd/etcd_test.go b/pkg/db/drivers/etcd/etcd_test.go
index ebbbaf1c5..138496485 100644
--- a/pkg/db/drivers/etcd/etcd_test.go
+++ b/pkg/db/drivers/etcd/etcd_test.go
@@ -25,9 +25,9 @@ import (
"strings"
"testing"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/model"
- . "github.com/opensds/opensds/testutils/collection"
+ c "github.com/sodafoundation/controller/pkg/context"
+ "github.com/sodafoundation/controller/pkg/model"
+ . "github.com/sodafoundation/controller/testutils/collection"
)
type fakeClientCaller struct{}
diff --git a/pkg/dock/discovery/discovery.go b/pkg/dock/discovery/discovery.go
deleted file mode 100755
index de22090c5..000000000
--- a/pkg/dock/discovery/discovery.go
+++ /dev/null
@@ -1,375 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements the entry into operations of storageDock module.
-
-*/
-
-package discovery
-
-import (
- "fmt"
- "os"
- "runtime"
- "strings"
- "time"
-
- log "github.com/golang/glog"
- "github.com/opensds/opensds/contrib/connector"
- "github.com/opensds/opensds/contrib/drivers"
- fd "github.com/opensds/opensds/contrib/drivers/filesharedrivers"
- "github.com/opensds/opensds/contrib/drivers/utils/config"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/model"
- "github.com/opensds/opensds/pkg/utils"
- . "github.com/opensds/opensds/pkg/utils/config"
- uuid "github.com/satori/go.uuid"
-)
-
-const (
- availableStatus = "available"
- unavailableStatus = "unavailable"
-)
-
-type Context struct {
- StopChan chan bool
- ErrChan chan error
- MetaChan chan string
-}
-
-func DiscoveryAndReport(dd DockDiscoverer, ctx *Context) {
- for {
- select {
- case <-ctx.StopChan:
- return
- default:
- if err := dd.Discover(); err != nil {
- ctx.ErrChan <- err
- }
-
- if err := dd.Report(); err != nil {
- ctx.ErrChan <- err
- }
- }
-
- time.Sleep(60 * time.Second)
- }
-}
-
-type DockDiscoverer interface {
- Init() error
-
- Discover() error
-
- Report() error
-}
-
-// NewDockDiscoverer method creates a new DockDiscoverer.
-func NewDockDiscoverer(dockType string) DockDiscoverer {
- switch dockType {
- case model.DockTypeProvioner:
- return &provisionDockDiscoverer{
- DockRegister: NewDockRegister(),
- }
- case model.DockTypeAttacher:
- return &attachDockDiscoverer{
- DockRegister: NewDockRegister(),
- }
- }
- return nil
-}
-
-// provisionDockDiscoverer is a struct for exposing some operations of provision
-// dock service discovery.
-type provisionDockDiscoverer struct {
- *DockRegister
-
- dcks []*model.DockSpec
- pols []*model.StoragePoolSpec
-}
-
-func (pdd *provisionDockDiscoverer) Init() error {
- // Load resource from specified file
- bm := GetBackendsMap()
- host, err := os.Hostname()
- if err != nil {
- log.Error("When get os hostname:", err)
- return err
- }
-
- for _, v := range CONF.EnabledBackends {
- b := bm[v]
- if b.Name == "" {
- continue
- }
-
- dck := &model.DockSpec{
- BaseModel: &model.BaseModel{
- Id: uuid.NewV5(uuid.NamespaceOID, host+":"+b.DriverName).String(),
- },
- Name: b.Name,
- Description: b.Description,
- DriverName: b.DriverName,
- Endpoint: CONF.OsdsDock.ApiEndpoint,
- NodeId: host,
- Type: model.DockTypeProvioner,
- Metadata: map[string]string{"HostReplicationDriver": CONF.OsdsDock.HostBasedReplicationDriver},
- }
- // Update the id if the dock is already in etcd
- name := map[string][]string{
- "Name": {dck.Name},
- }
- docks, err := pdd.DockRegister.c.ListDocksWithFilter(c.NewAdminContext(), name)
- if err == nil && len(docks) != 0 {
- dck.Id = docks[0].Id
- }
- pdd.dcks = append(pdd.dcks, dck)
- }
-
- return nil
-}
-
-var filesharedrivers = []string{config.NFSDriverType, config.HuaweiOceanStorFileDriverType, config.ManilaDriverType, config.ChubaofsDriverType}
-
-func (pdd *provisionDockDiscoverer) Discover() error {
- // Clear existing pool info
- pdd.pols = pdd.pols[:0]
- var pols []*model.StoragePoolSpec
- var err error
- var polsInDb []*model.StoragePoolSpec
- ctx := c.NewAdminContext()
- polsInDb, err = pdd.c.ListPools(ctx)
- if err != nil {
- return fmt.Errorf("can not read pools in db")
- }
- dbPolsMap := make(map[string]map[string]*model.StoragePoolSpec)
- for _, dck := range pdd.dcks {
- dbPolsMap[dck.Id] = make(map[string]*model.StoragePoolSpec)
- }
- for _, polInDb := range polsInDb {
- if dbPolsMap[polInDb.DockId] != nil {
- polInDb.Status = unavailableStatus
- dbPolsMap[polInDb.DockId][polInDb.Id] = polInDb
- }
- }
- for _, dck := range pdd.dcks {
- // Call function of StorageDrivers configured by storage drivers.
- if utils.Contains(filesharedrivers, dck.DriverName) {
- d := fd.Init(dck.DriverName)
- defer fd.Clean(d)
- pols, err = d.ListPools()
- for _, pol := range pols {
- log.Infof("Backend %s discovered pool %s", dck.DriverName, pol.Name)
- delete(dbPolsMap[dck.Id], pol.Id)
- pol.DockId = dck.Id
- pol.Status = availableStatus
- }
- } else {
- d := drivers.Init(dck.DriverName)
- defer drivers.Clean(d)
- pols, err = d.ListPools()
-
- replicationDriverName := dck.Metadata["HostReplicationDriver"]
- replicationType := model.ReplicationTypeHost
- if drivers.IsSupportArrayBasedReplication(dck.DriverName) {
- replicationType = model.ReplicationTypeArray
- replicationDriverName = dck.DriverName
- }
- for _, pol := range pols {
- log.Infof("Backend %s discovered pool %s", dck.DriverName, pol.Name)
- name := map[string][]string{
- "Name": {pol.Name},
- "DockId": {dck.Id},
- }
- pools, err := pdd.c.ListPoolsWithFilter(ctx, name)
- if err == nil && len(pools) != 0 {
- pol.Id = pools[0].Id
- }
-
- delete(dbPolsMap[dck.Id], pol.Id)
- pol.DockId = dck.Id
- pol.ReplicationType = replicationType
- pol.ReplicationDriverName = replicationDriverName
- pol.Status = availableStatus
- }
- }
- if err != nil {
- log.Error("Call driver to list pools failed:", err)
- continue
- }
-
- if len(pols) == 0 {
- log.Warningf("The pool of dock %s is empty!\n", dck.Id)
- }
-
- pdd.pols = append(pdd.pols, pols...)
- for _, pol := range dbPolsMap[dck.Id] {
- pdd.pols = append(pdd.pols, pol)
- }
-
- }
- if len(pdd.pols) == 0 {
- return fmt.Errorf("there is no pool can be found")
- }
-
- return nil
-}
-
-func (pdd *provisionDockDiscoverer) Report() error {
- var err error
-
- // Store dock resources in database.
- for _, dck := range pdd.dcks {
- if err = pdd.Register(dck); err != nil {
- break
- }
- }
-
- // Store pool resources in database.
- for _, pol := range pdd.pols {
- if err != nil {
- break
- }
- err = pdd.Register(pol)
- }
-
- return err
-}
-
-// attachDockDiscoverer is a struct for exposing some operations of attach
-// dock service discovery.
-type attachDockDiscoverer struct {
- *DockRegister
-
- dck *model.DockSpec
-}
-
-func (add *attachDockDiscoverer) Init() error { return nil }
-
-func (add *attachDockDiscoverer) Discover() error {
- host, err := os.Hostname()
- if err != nil {
- log.Error("When get os hostname:", err)
- return err
- }
-
- localIqn, err := connector.NewConnector(connector.IscsiDriver).GetInitiatorInfo()
- if err != nil {
- log.Warning("get initiator failed, ", err)
- }
-
- bindIp := CONF.BindIp
- if bindIp == "" {
- bindIp = connector.GetHostIP()
- }
-
- fcInitiator, err := connector.NewConnector(connector.FcDriver).GetInitiatorInfo()
- if err != nil {
- log.Warning("get initiator failed, ", err)
- }
-
- var wwpns []string
- for _, v := range fcInitiator {
- if strings.Contains(v, "node_name") {
- wwpns = append(wwpns, strings.Split(v, ":")[1])
- }
- }
-
- segments := strings.Split(CONF.OsdsDock.ApiEndpoint, ":")
- endpointIp := segments[len(segments)-2]
- add.dck = &model.DockSpec{
- BaseModel: &model.BaseModel{
- Id: uuid.NewV5(uuid.NamespaceOID, host+":"+endpointIp).String(),
- },
- Endpoint: CONF.OsdsDock.ApiEndpoint,
- NodeId: host,
- Type: model.DockTypeAttacher,
- Metadata: map[string]string{
- "Platform": runtime.GOARCH,
- "OsType": runtime.GOOS,
- "HostIp": bindIp,
- "Initiator": localIqn[0],
- "WWPNS": strings.Join(wwpns, ","),
- },
- }
- return nil
-}
-
-func (add *attachDockDiscoverer) Report() error {
- return add.Register(add.dck)
-}
-
-func NewDockRegister() *DockRegister {
- return &DockRegister{c: db.C}
-}
-
-type DockRegister struct {
- c db.Client
-}
-
-func (dr *DockRegister) Register(in interface{}) error {
- ctx := c.NewAdminContext()
-
- switch in.(type) {
- case *model.DockSpec:
- dck := in.(*model.DockSpec)
- // Call db module to create dock resource.
- if _, err := dr.c.CreateDock(ctx, dck); err != nil {
- log.Errorf("When create dock %s in db: %v\n", dck.Id, err)
- return err
- }
- break
- case *model.StoragePoolSpec:
- pol := in.(*model.StoragePoolSpec)
- // Call db module to create pool resource.
- if _, err := dr.c.CreatePool(ctx, pol); err != nil {
- log.Errorf("When create pool %s in db: %v\n", pol.Id, err)
- return err
- }
- break
- default:
- return fmt.Errorf("Resource type is not supported!")
- }
-
- return nil
-}
-
-func (dr *DockRegister) Unregister(in interface{}) error {
- ctx := c.NewAdminContext()
-
- switch in.(type) {
- case *model.DockSpec:
- dck := in.(*model.DockSpec)
- // Call db module to delete dock resource.
- if err := dr.c.DeleteDock(ctx, dck.Id); err != nil {
- log.Errorf("When delete dock %s in db: %v\n", dck.Id, err)
- return err
- }
- break
- case *model.StoragePoolSpec:
- pol := in.(*model.StoragePoolSpec)
- // Call db module to delete pool resource.
- if err := dr.c.DeletePool(ctx, pol.Id); err != nil {
- log.Errorf("When delete pool %s in db: %v\n", pol.Id, err)
- return err
- }
- break
- default:
- return fmt.Errorf("Resource type is not supported!")
- }
-
- return nil
-}
diff --git a/pkg/dock/discovery/discovery_test.go b/pkg/dock/discovery/discovery_test.go
deleted file mode 100644
index 5a137f292..000000000
--- a/pkg/dock/discovery/discovery_test.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2017 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package discovery
-
-import (
- "reflect"
- "testing"
-
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/model"
- . "github.com/opensds/opensds/pkg/utils/config"
- . "github.com/opensds/opensds/testutils/collection"
- dbtest "github.com/opensds/opensds/testutils/db/testing"
-)
-
-const (
- expectedUuid = "0e9c3c68-8a0b-11e7-94a7-67f755e235cb"
- expectedCreatedAt = "2017-08-26T11:01:09"
- expectedUpdatedAt = "2017-08-26T11:01:55"
-)
-
-func init() {
- CONF.OsdsDock = OsdsDock{
- ApiEndpoint: "localhost:50050",
- EnabledBackends: []string{"sample"},
- Backends: Backends{
- Sample: BackendProperties{
- Name: "sample",
- Description: "sample backend service",
- DriverName: "sample",
- },
- },
- }
-}
-
-func NewFakeDockDiscoverer() *provisionDockDiscoverer {
- return &provisionDockDiscoverer{
- DockRegister: &DockRegister{},
- }
-}
-
-func TestInit(t *testing.T) {
- var fdd = NewFakeDockDiscoverer()
- var expected []*model.DockSpec
-
- for i := range SampleDocks {
- expected = append(expected, &SampleDocks[i])
- }
- name := map[string][]string{"Name": {SampleDocks[0].Name}}
- mockClient := new(dbtest.Client)
- mockClient.On("ListDocksWithFilter", c.NewAdminContext(), name).Return(expected, nil)
- fdd.c = mockClient
- if err := fdd.Init(); err != nil {
- t.Errorf("Failed to init discoverer struct: %v\n", err)
- }
- for i := range fdd.dcks {
- fdd.dcks[i].Id = ""
- fdd.dcks[i].NodeId = ""
- fdd.dcks[i].Metadata = nil
- expected[i].Id = ""
- }
- if !reflect.DeepEqual(fdd.dcks, expected) {
- t.Errorf("Expected %+v, got %+v\n", expected, fdd.dcks)
- }
-}
-
-func TestDiscover(t *testing.T) {
- var fdd = NewFakeDockDiscoverer()
- var expected []*model.StoragePoolSpec
-
- for i := range SampleDocks {
- fdd.dcks = append(fdd.dcks, &SampleDocks[i])
- }
- for i := range SamplePools {
- fdd.pols = append(fdd.pols, &SamplePools[i])
- expected = append(expected, &SamplePools[i])
- }
- m1 := map[string][]string{
- "Name": {SamplePools[0].Name},
- "DockId": {""},
- }
- m2 := map[string][]string{
- "Name": {SamplePools[1].Name},
- "DockId": {""},
- }
- m3 := map[string][]string{
- "Name": {SamplePools[2].Name},
- "DockId": {""},
- }
-
- mockClient := new(dbtest.Client)
- mockClient.On("ListPools", c.NewAdminContext()).Return(fdd.pols, nil)
- mockClient.On("ListPoolsWithFilter", c.NewAdminContext(), m1).Return(expected, nil)
- mockClient.On("ListPoolsWithFilter", c.NewAdminContext(), m2).Return(expected, nil)
- mockClient.On("ListPoolsWithFilter", c.NewAdminContext(), m3).Return(expected, nil)
- fdd.c = mockClient
-
- if err := fdd.Discover(); err != nil {
- t.Errorf("Failed to discoverer pools: %v\n", err)
- }
- for _, pol := range fdd.pols {
- pol.Id = ""
- }
- if !reflect.DeepEqual(fdd.pols, expected) {
- t.Errorf("Expected %+v, got %+v\n", expected, fdd.pols)
- }
-}
-
-func TestReport(t *testing.T) {
- var fdd = NewFakeDockDiscoverer()
-
- for i := range SampleDocks {
- fdd.dcks = append(fdd.dcks, &SampleDocks[i])
- }
- for i := range SamplePools {
- fdd.pols = append(fdd.pols, &SamplePools[i])
- }
-
- mockClient := new(dbtest.Client)
- mockClient.On("CreateDock", c.NewAdminContext(), fdd.dcks[0]).Return(nil, nil)
- mockClient.On("CreatePool", c.NewAdminContext(), fdd.pols[0]).Return(nil, nil)
- mockClient.On("CreatePool", c.NewAdminContext(), fdd.pols[1]).Return(nil, nil)
- mockClient.On("CreatePool", c.NewAdminContext(), fdd.pols[2]).Return(nil, nil)
- fdd.c = mockClient
-
- if err := fdd.Report(); err != nil {
- t.Errorf("Failed to store docks and pools into database: %v\n", err)
- }
-}
diff --git a/pkg/dock/dock.go b/pkg/dock/dock.go
deleted file mode 100755
index 5e0e59edc..000000000
--- a/pkg/dock/dock.go
+++ /dev/null
@@ -1,594 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements the entry into operations of storageDock module.
-*/
-
-package dock
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "net"
-
- log "github.com/golang/glog"
- "github.com/opensds/opensds/contrib/connector"
- "github.com/opensds/opensds/contrib/drivers"
- "github.com/opensds/opensds/contrib/drivers/filesharedrivers"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/db"
- "github.com/opensds/opensds/pkg/dock/discovery"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- "google.golang.org/grpc"
-
- _ "github.com/opensds/opensds/contrib/connector/fc"
- _ "github.com/opensds/opensds/contrib/connector/iscsi"
- _ "github.com/opensds/opensds/contrib/connector/nfs"
- _ "github.com/opensds/opensds/contrib/connector/nvmeof"
- _ "github.com/opensds/opensds/contrib/connector/rbd"
-)
-
-// dockServer is used to implement pb.DockServer
-type dockServer struct {
- Port string
- // Discoverer represents the mechanism of DockHub discovering the storage
- // capabilities from different backends.
- Discoverer discovery.DockDiscoverer
- // Driver represents the specified backend resource. This field is used
- // for initializing the specified volume driver.
- Driver drivers.VolumeDriver
- // Metrics driver to collect metrics
- MetricDriver drivers.MetricDriver
-
- // FileShareDriver represents the specified backend resource. This field is used
- // for initializing the specified file share driver.
- FileShareDriver filesharedrivers.FileShareDriver
-}
-
-// NewDockServer returns a dockServer instance.
-func NewDockServer(dockType, port string) *dockServer {
- return &dockServer{
- Port: port,
- Discoverer: discovery.NewDockDiscoverer(dockType),
- }
-}
-
-// Run method would automatically discover dock and pool resources from
-// backends, and then start the listen mechanism of dock module.
-func (ds *dockServer) Run() error {
- // New Grpc Server
- s := grpc.NewServer()
- // Register dock service.
- pb.RegisterProvisionDockServer(s, ds)
- pb.RegisterAttachDockServer(s, ds)
- pb.RegisterFileShareDockServer(s, ds)
-
- // Trigger the discovery and report loop so that the dock service would
- // update the capabilities from backends automatically.
- if err := func() error {
- var err error
- if err = ds.Discoverer.Init(); err != nil {
- return err
- }
- ctx := &discovery.Context{
- StopChan: make(chan bool),
- ErrChan: make(chan error),
- MetaChan: make(chan string),
- }
- go discovery.DiscoveryAndReport(ds.Discoverer, ctx)
- go func(ctx *discovery.Context) {
- if err = <-ctx.ErrChan; err != nil {
- log.Error("when calling capabilty report method:", err)
- ctx.StopChan <- true
- }
- }(ctx)
- return err
- }(); err != nil {
- return err
- }
-
- // Listen the dock server port.
- lis, err := net.Listen("tcp", ds.Port)
- if err != nil {
- log.Fatalf("failed to listen: %+v", err)
- return err
- }
-
- log.Info("Dock server initialized! Start listening on port:", lis.Addr())
-
- // Start dock server watching loop.
- defer s.Stop()
- return s.Serve(lis)
-}
-
-// CreateVolume implements pb.DockServer.CreateVolume
-func (ds *dockServer) CreateVolume(ctx context.Context, opt *pb.CreateVolumeOpts) (*pb.GenericResponse, error) {
- // Get the storage drivers and do some initializations.
- ds.Driver = drivers.Init(opt.GetDriverName())
- defer drivers.Clean(ds.Driver)
-
- log.Info("Dock server receive create volume request, vr =", opt)
-
- vol, err := ds.Driver.CreateVolume(opt)
- if err != nil {
- log.Error("when create volume in dock module:", err)
- return pb.GenericResponseError(err), err
- }
-
- return pb.GenericResponseResult(vol), nil
-}
-
-// DeleteVolume implements pb.DockServer.DeleteVolume
-func (ds *dockServer) DeleteVolume(ctx context.Context, opt *pb.DeleteVolumeOpts) (*pb.GenericResponse, error) {
- // Get the storage drivers and do some initializations.
- ds.Driver = drivers.Init(opt.GetDriverName())
- defer drivers.Clean(ds.Driver)
-
- log.Info("Dock server receive delete volume request, vr =", opt)
-
- if err := ds.Driver.DeleteVolume(opt); err != nil {
- log.Error("error occurred in dock module when delete volume:", err)
- return pb.GenericResponseError(err), err
- }
-
- return pb.GenericResponseResult(nil), nil
-}
-
-// ExtendVolume implements pb.DockServer.ExtendVolume
-func (ds *dockServer) ExtendVolume(ctx context.Context, opt *pb.ExtendVolumeOpts) (*pb.GenericResponse, error) {
- // Get the storage drivers and do some initializations.
- ds.Driver = drivers.Init(opt.GetDriverName())
- defer drivers.Clean(ds.Driver)
-
- log.Info("Dock server receive extend volume request, vr =", opt)
-
- vol, err := ds.Driver.ExtendVolume(opt)
- if err != nil {
- log.Error("when extend volume in dock module:", err)
- return pb.GenericResponseError(err), err
- }
-
- return pb.GenericResponseResult(vol), nil
-}
-
-// CreateVolumeAttachment implements pb.DockServer.CreateVolumeAttachment
-func (ds *dockServer) CreateVolumeAttachment(ctx context.Context, opt *pb.CreateVolumeAttachmentOpts) (*pb.GenericResponse, error) {
- // Get the storage drivers and do some initializations.
- ds.Driver = drivers.Init(opt.GetDriverName())
- defer drivers.Clean(ds.Driver)
-
- log.Info("Dock server receive create volume attachment request, vr =", opt)
-
- connInfo, err := ds.Driver.InitializeConnection(opt)
- if err != nil {
- log.Error("error occurred in dock module when initialize volume connection:", err)
- return pb.GenericResponseError(err), err
- }
-
- var atc = &model.VolumeAttachmentSpec{
- BaseModel: &model.BaseModel{
- Id: opt.GetId(),
- },
- ConnectionInfo: *connInfo,
- }
- log.V(8).Infof("CreateVolumeAttachment result: %v", atc)
- return pb.GenericResponseResult(atc), nil
-}
-
-// DeleteVolumeAttachment implements pb.DockServer.DeleteVolumeAttachment
-func (ds *dockServer) DeleteVolumeAttachment(ctx context.Context, opt *pb.DeleteVolumeAttachmentOpts) (*pb.GenericResponse, error) {
- // Get the storage drivers and do some initializations.
- ds.Driver = drivers.Init(opt.GetDriverName())
- defer drivers.Clean(ds.Driver)
-
- log.Info("Dock server receive delete volume attachment request, vr =", opt)
-
- if err := ds.Driver.TerminateConnection(opt); err != nil {
- log.Error("error occurred in dock module when terminate volume connection:", err)
- return pb.GenericResponseError(err), err
- }
-
- return pb.GenericResponseResult(nil), nil
-}
-
-// CreateVolumeSnapshot implements pb.DockServer.CreateVolumeSnapshot
-func (ds *dockServer) CreateVolumeSnapshot(ctx context.Context, opt *pb.CreateVolumeSnapshotOpts) (*pb.GenericResponse, error) {
- // Get the storage drivers and do some initializations.
- ds.Driver = drivers.Init(opt.GetDriverName())
- defer drivers.Clean(ds.Driver)
-
- log.Info("Dock server receive create volume snapshot request, vr =", opt)
-
- snp, err := ds.Driver.CreateSnapshot(opt)
- if err != nil {
- log.Error("error occurred in dock module when create snapshot:", err)
- return pb.GenericResponseError(err), err
- }
-
- return pb.GenericResponseResult(snp), nil
-}
-
-// DeleteVolumeSnapshot implements pb.DockServer.DeleteVolumeSnapshot
-func (ds *dockServer) DeleteVolumeSnapshot(ctx context.Context, opt *pb.DeleteVolumeSnapshotOpts) (*pb.GenericResponse, error) {
- // Get the storage drivers and do some initializations.
- ds.Driver = drivers.Init(opt.GetDriverName())
- defer drivers.Clean(ds.Driver)
-
- log.Info("Dock server receive delete volume snapshot request, vr =", opt)
-
- if err := ds.Driver.DeleteSnapshot(opt); err != nil {
- log.Error("error occurred in dock module when delete snapshot:", err)
- return pb.GenericResponseError(err), err
- }
-
- return pb.GenericResponseResult(nil), nil
-}
-
-// AttachVolume implements pb.DockServer.AttachVolume
-func (ds *dockServer) AttachVolume(ctx context.Context, opt *pb.AttachVolumeOpts) (*pb.GenericResponse, error) {
- var connData = make(map[string]interface{})
- if err := json.Unmarshal([]byte(opt.GetConnectionData()), &connData); err != nil {
- log.Error("error occurred in dock module when unmarshalling connection data!")
- return pb.GenericResponseError(err), err
- }
-
- log.Info("Dock server receive attach volume request, vr =", opt)
-
- con := connector.NewConnector(opt.GetAccessProtocol())
- if con == nil {
- err := fmt.Errorf("can not find connector (%s)!", opt.GetAccessProtocol())
- return pb.GenericResponseError(err), err
- }
- atc, err := con.Attach(connData)
- if err != nil {
- log.Error("error occurred in dock module when attach volume:", err)
- return pb.GenericResponseError(err), err
- }
-
- return pb.GenericResponseResult(atc), nil
-}
-
-// DetachVolume implements pb.DockServer.DetachVolume
-func (ds *dockServer) DetachVolume(ctx context.Context, opt *pb.DetachVolumeOpts) (*pb.GenericResponse, error) {
- var connData = make(map[string]interface{})
- if err := json.Unmarshal([]byte(opt.GetConnectionData()), &connData); err != nil {
- log.Error("error occurred in dock module when unmarshalling connection data!")
- return pb.GenericResponseError(err), err
- }
-
- log.Info("Dock server receive detach volume request, vr =", opt)
-
- con := connector.NewConnector(opt.GetAccessProtocol())
- if con == nil {
- err := fmt.Errorf("can not find connector (%s)!", opt.GetAccessProtocol())
- return pb.GenericResponseError(err), err
- }
- if err := con.Detach(connData); err != nil {
- log.Error("error occurred in dock module when detach volume:", err)
- return pb.GenericResponseError(err), err
- }
-
- return pb.GenericResponseResult(nil), nil
-}
-
-// CreateReplication implements opensds.DockServer
-func (ds *dockServer) CreateReplication(ctx context.Context, opt *pb.CreateReplicationOpts) (*pb.GenericResponse, error) {
- //Get the storage replication drivers and do some initializations.
- driver, _ := drivers.InitReplicationDriver(opt.GetDriverName())
- defer drivers.CleanReplicationDriver(driver)
-
- log.Info("Dock server receive create replication request, vr =", opt)
- replica, err := driver.CreateReplication(opt)
- if err != nil {
- log.Error("error occurred in dock module when create replication:", err)
- return pb.GenericResponseError(err), err
- }
-
- replica.PoolId = opt.GetPoolId()
- replica.ProfileId = opt.GetProfileId()
- replica.Name = opt.GetName()
-
- return pb.GenericResponseResult(replica), nil
-}
-
-func (ds *dockServer) DeleteReplication(ctx context.Context, opt *pb.DeleteReplicationOpts) (*pb.GenericResponse, error) {
- // Get the storage replication drivers and do some initializations.
- driver, _ := drivers.InitReplicationDriver(opt.GetDriverName())
- defer drivers.CleanReplicationDriver(driver)
-
- log.Info("Dock server receive delete replication request, vr =", opt)
-
- if err := driver.DeleteReplication(opt); err != nil {
- log.Error("error occurred in dock module when delete replication:", err)
- return pb.GenericResponseError(err), err
- }
-
- return pb.GenericResponseResult(nil), nil
-}
-
-func (ds *dockServer) EnableReplication(ctx context.Context, opt *pb.EnableReplicationOpts) (*pb.GenericResponse, error) {
- // Get the storage replication drivers and do some initializations.
- driver, _ := drivers.InitReplicationDriver(opt.GetDriverName())
- defer drivers.CleanReplicationDriver(driver)
-
- log.Info("Dock server receive enable replication request, vr =", opt)
-
- if err := driver.EnableReplication(opt); err != nil {
- log.Error("error occurred in dock module when enable replication:", err)
- return pb.GenericResponseError(err), err
- }
-
- return pb.GenericResponseResult(nil), nil
-}
-
-func (ds *dockServer) DisableReplication(ctx context.Context, opt *pb.DisableReplicationOpts) (*pb.GenericResponse, error) {
- // Get the storage replication drivers and do some initializations.
- driver, _ := drivers.InitReplicationDriver(opt.GetDriverName())
- defer drivers.CleanReplicationDriver(driver)
-
- log.Info("Dock server receive disable replication request, vr =", opt)
-
- if err := driver.DisableReplication(opt); err != nil {
- log.Error("error occurred in dock module when disable replication:", err)
- return pb.GenericResponseError(err), err
- }
-
- return pb.GenericResponseResult(nil), nil
-}
-
-func (ds *dockServer) FailoverReplication(ctx context.Context, opt *pb.FailoverReplicationOpts) (*pb.GenericResponse, error) {
- // Get the storage replication drivers and do some initializations.
- driver, _ := drivers.InitReplicationDriver(opt.GetDriverName())
- defer drivers.CleanReplicationDriver(driver)
-
- log.Info("Dock server receive failover replication request, vr =", opt)
-
- if err := driver.FailoverReplication(opt); err != nil {
- log.Error("error occurred in dock module when failover replication:", err)
- return pb.GenericResponseError(err), err
- }
-
- return pb.GenericResponseResult(nil), nil
-}
-
-// CreateVolumeGroup implements pb.DockServer.CreateVolumeGroup
-func (ds *dockServer) CreateVolumeGroup(ctx context.Context, opt *pb.CreateVolumeGroupOpts) (*pb.GenericResponse, error) {
- // Get the storage drivers and do some initializations.
- ds.Driver = drivers.Init(opt.GetDriverName())
- defer drivers.Clean(ds.Driver)
-
- log.Info("Dock server receive create volume group request, vr =", opt)
-
- vg, err := ds.Driver.CreateVolumeGroup(opt)
- if err != nil {
- if _, ok := err.(*model.NotImplementError); !ok {
- log.Error("when calling volume driver to create volume group:", err)
- return pb.GenericResponseError(err), err
- }
- }
-
- log.Infof("Create volume group (%s) successfully.\n", opt.GetId())
- // TODO Currently no storage driver has implemented with volume group operations,
- // So we will fetch the volume group resource from database as the fake one.
- vg, err = db.C.GetVolumeGroup(c.NewContextFromJson(opt.GetContext()), opt.GetId())
- if err != nil {
- return pb.GenericResponseError(err), err
- }
- return pb.GenericResponseResult(vg), nil
-}
-
-func (ds *dockServer) UpdateVolumeGroup(ctx context.Context, opt *pb.UpdateVolumeGroupOpts) (*pb.GenericResponse, error) {
- // Get the storage drivers and do some initializations.
- ds.Driver = drivers.Init(opt.GetDriverName())
- defer drivers.Clean(ds.Driver)
-
- log.Info("Dock server receive update volume group request, vr =", opt)
-
- vg, err := ds.Driver.UpdateVolumeGroup(opt)
- if err != nil {
- if _, ok := err.(*model.NotImplementError); !ok {
- err = errors.New("error occurred when updating group" + opt.GetId() + "," + err.Error())
- return pb.GenericResponseError(err), err
- }
- }
-
- log.Infof("Update volume group (%s) successfully.\n", opt.GetId())
- // TODO Currently no storage driver has implemented with volume group operations,
- // So we will fetch the volume group resource from database as the fake one.
- vg, err = db.C.GetVolumeGroup(c.NewContextFromJson(opt.GetContext()), opt.GetId())
- if err != nil {
- return pb.GenericResponseError(err), err
- }
- return pb.GenericResponseResult(vg), nil
-}
-
-func (ds *dockServer) DeleteVolumeGroup(ctx context.Context, opt *pb.DeleteVolumeGroupOpts) (*pb.GenericResponse, error) {
- // Get the storage drivers and do some initializations.
- ds.Driver = drivers.Init(opt.GetDriverName())
- defer drivers.Clean(ds.Driver)
-
- log.Info("Dock server receive delete volume group request, vr =", opt)
-
- if err := ds.Driver.DeleteVolumeGroup(opt); err != nil {
- if _, ok := err.(*model.NotImplementError); !ok {
- return pb.GenericResponseError(err), err
- }
- if err = ds.deleteGroupGeneric(opt); err != nil {
- return pb.GenericResponseError(err), err
- }
- }
-
- log.Infof("Delete volume group (%s) successfully.\n", opt.GetId())
- return pb.GenericResponseResult(nil), nil
-}
-
-func (ds *dockServer) deleteGroupGeneric(opt *pb.DeleteVolumeGroupOpts) error {
- ctx := c.NewContextFromJson(opt.GetContext())
-
- volumes, err := db.C.ListVolumesByGroupId(ctx, opt.GetId())
- if err != nil {
- return err
- }
- for _, volRef := range volumes {
- if err = ds.Driver.DeleteVolume(&pb.DeleteVolumeOpts{
- Id: volRef.Id,
- Metadata: volRef.Metadata,
- }); err != nil {
- log.Error(fmt.Sprintf("error occurred when delete volume %s from group.", volRef.Id))
- db.UpdateVolumeStatus(ctx, db.C, volRef.Id, model.VolumeError)
- db.UpdateVolumeGroupStatus(ctx, db.C, opt.GetId(), model.VolumeGroupError)
- } else {
- // Delete the volume entry in DB after successfully deleting the
- // volume on the storage.
- db.C.DeleteVolume(ctx, volRef.Id)
- }
- }
-
- return nil
-}
-
-// Collect the specified metrics from the metric driver
-func (ds *dockServer) CollectMetrics(ctx context.Context, opt *pb.CollectMetricsOpts) (*pb.GenericResponse, error) {
- log.V(5).Info("in dock CollectMetrics methods")
- ds.MetricDriver = drivers.InitMetricDriver(opt.GetDriverName())
-
- defer drivers.CleanMetricDriver(ds.MetricDriver)
-
- log.Infof("dock server receive CollectMetrics request, vr =%s", opt)
-
- result, err := ds.MetricDriver.CollectMetrics()
- if err != nil {
- log.Errorf("error occurred in dock module for collect metrics: %s", err.Error())
- return pb.GenericResponseError(err), err
- }
-
- return pb.GenericResponseResult(result), nil
-}
-
-// CreateFileShareAcl implements pb.DockServer.CreateFileShare
-func (ds *dockServer) CreateFileShareAcl(ctx context.Context, opt *pb.CreateFileShareAclOpts) (*pb.GenericResponse, error) {
- // Get the storage drivers and do some initializations.
- ds.FileShareDriver = filesharedrivers.Init(opt.GetDriverName())
- defer filesharedrivers.Clean(ds.FileShareDriver)
-
- log.Info("dock server receive create file share acl request, vr =", opt)
-
- fileshare, err := ds.FileShareDriver.CreateFileShareAcl(opt)
- if err != nil {
- log.Error("when create file share acl in dock module:", err)
- return pb.GenericResponseError(err), err
- }
-
- return pb.GenericResponseResult(fileshare), nil
-}
-
-// DeleteFileShareAcl implements pb.DockServer.DeleteFileShare
-func (ds *dockServer) DeleteFileShareAcl(ctx context.Context, opt *pb.DeleteFileShareAclOpts) (*pb.GenericResponse, error) {
- // Get the storage drivers and do some initializations.
- ds.FileShareDriver = filesharedrivers.Init(opt.GetDriverName())
- defer filesharedrivers.Clean(ds.FileShareDriver)
-
- log.Info("dock server receive delete file share acl request, vr =", opt)
-
- if err := ds.FileShareDriver.DeleteFileShareAcl(opt); err != nil {
- log.Error("when delete file share acl in dock module:", err)
- return pb.GenericResponseError(err), err
- }
-
- return pb.GenericResponseResult(nil), nil
-}
-
-// CreateFileShare implements pb.DockServer.CreateFileShare
-func (ds *dockServer) CreateFileShare(ctx context.Context, opt *pb.CreateFileShareOpts) (*pb.GenericResponse, error) {
- // Get the storage drivers and do some initializations.
- ds.FileShareDriver = filesharedrivers.Init(opt.GetDriverName())
- defer filesharedrivers.Clean(ds.FileShareDriver)
-
- log.Info("Dock server receive create file share request, vr =", opt)
-
- log.V(5).Infof("Dock server create fleshare: sent to Driver %+v", opt.GetDriverName())
-
- fileshare, err := ds.FileShareDriver.CreateFileShare(opt)
- if err != nil {
- log.Error("when create file share in dock module:", err)
- return pb.GenericResponseError(err), err
- }
-
- return pb.GenericResponseResult(fileshare), nil
-}
-
-// DeleteFileShare implements pb.DockServer.DeleteFileShare
-func (ds *dockServer) DeleteFileShare(ctx context.Context, opt *pb.DeleteFileShareOpts) (*pb.GenericResponse, error) {
-
- // Get the storage drivers and do some initializations.
- ds.FileShareDriver = filesharedrivers.Init(opt.GetDriverName())
- defer filesharedrivers.Clean(ds.FileShareDriver)
-
- log.Info("Dock server receive delete file share request, vr =", opt)
-
- if err := ds.FileShareDriver.DeleteFileShare(opt); err != nil {
- log.Error("error occurred in dock module when delete file share:", err)
- return pb.GenericResponseError(err), err
- }
-
- return pb.GenericResponseResult(nil), nil
-}
-
-// CreateFileShareSnapshot implements pb.DockServer.CreateFileShareSnapshot
-func (ds *dockServer) CreateFileShareSnapshot(ctx context.Context, opt *pb.CreateFileShareSnapshotOpts) (*pb.GenericResponse, error) {
- // Get the storage drivers and do some initializations.
- ds.FileShareDriver = filesharedrivers.Init(opt.GetDriverName())
- defer filesharedrivers.Clean(ds.FileShareDriver)
-
- log.Info("Dock server receive create file share snapshot request, vr =", opt)
-
- snp, err := ds.FileShareDriver.CreateFileShareSnapshot(opt)
- if err != nil {
- log.Error("error occurred in dock module when create snapshot:", err)
- return pb.GenericResponseError(err), err
- }
-
- return pb.GenericResponseResult(snp), nil
-}
-
-func (ds *dockServer) DeleteFileShareSnapshot(ctx context.Context, opt *pb.DeleteFileShareSnapshotOpts) (*pb.GenericResponse, error) {
- // Get the storage drivers and do some initializations.
- ds.FileShareDriver = filesharedrivers.Init(opt.GetDriverName())
- defer filesharedrivers.Clean(ds.FileShareDriver)
-
- log.Info("Dock server receive delete file share snapshot request, vr =", opt)
-
- if err := ds.FileShareDriver.DeleteFileShareSnapshot(opt); err != nil {
- log.Error("error occurred in dock module when delete snapshot:", err)
- return pb.GenericResponseError(err), err
- }
-
- return pb.GenericResponseResult(nil), nil
-}
-
-// GetMetrics method is only defined to make ProvisioinDock service consistent with
-// ProvisionController service, so this method is not allowed to be called.
-func (ds *dockServer) GetMetrics(context.Context, *pb.GetMetricsOpts) (*pb.GenericResponse, error) {
- return nil, &model.NotImplementError{"method GetMetrics has not been implemented yet"}
-}
-
-// GetUrls method is only defined to make ProvisioinDock service consistent with
-// ProvisionController service, so this method is not allowed to be called.
-func (ds *dockServer) GetUrls(context.Context, *pb.NoParams) (*pb.GenericResponse, error) {
- return nil, &model.NotImplementError{"method GetUrls has not been implemented yet"}
-}
diff --git a/pkg/dock/dock_test.go b/pkg/dock/dock_test.go
deleted file mode 100644
index 0050fb95d..000000000
--- a/pkg/dock/dock_test.go
+++ /dev/null
@@ -1,390 +0,0 @@
-// Copyright 2019 The OpenSDS Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-This module implements the entry into operations of storageDock module.
-*/
-
-package dock
-
-import (
- "context"
- "reflect"
- "testing"
-
- "github.com/opensds/opensds/contrib/drivers"
- "github.com/opensds/opensds/contrib/drivers/filesharedrivers"
- "github.com/opensds/opensds/pkg/dock/discovery"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- data "github.com/opensds/opensds/testutils/collection"
-)
-
-func NewFakeDockServer() *dockServer {
- return &dockServer{
- Port: "50050",
- Discoverer: discovery.NewDockDiscoverer(model.DockTypeProvioner),
- }
-}
-
-func NewFakeAttachDockServer() *dockServer {
- return &dockServer{
- Port: "50050",
- Discoverer: discovery.NewDockDiscoverer(model.DockTypeAttacher),
- }
-}
-
-func TestNewDockServer(t *testing.T) {
- type args struct {
- dockType string
- port string
- }
- tests := []struct {
- name string
- args args
- want *dockServer
- }{
- {
- name: "Provisioner docktype test",
- args: args{model.DockTypeProvioner, "50050"},
- want: NewFakeDockServer(),
- },
- {
- name: "Attacher docktype test",
- args: args{model.DockTypeAttacher, "50050"},
- want: NewFakeAttachDockServer(),
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got := NewDockServer(tt.args.dockType, tt.args.port)
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("NewDockServer() = %v, want %v", got, tt.want)
- }
-
- })
- }
-}
-
-func Test_dockServer_CreateFileShareAcl(t *testing.T) {
- type fields struct {
- Port string
- Discoverer discovery.DockDiscoverer
- Driver drivers.VolumeDriver
- MetricDriver drivers.MetricDriver
- FileShareDriver filesharedrivers.FileShareDriver
- }
- type args struct {
- ctx context.Context
- opt *pb.CreateFileShareAclOpts
- }
- var req = &pb.CreateFileShareAclOpts{
- Id: "d2975ebe-d82c-430f-b28e-f373746a71ca",
- Description: "This is a sample Acl for testing",
- Type: "ip",
- AccessTo: "10.21.23.10",
- AccessCapability: []string{"Read", "Write"},
- }
- want1 := &pb.GenericResponse{
- Reply: &pb.GenericResponse_Result_{
- Result: &pb.GenericResponse_Result{
- Message: data.ByteFileShareAcl,
- },
- },
- }
- tests := []struct {
- name string
- fields fields
- args args
- want *pb.GenericResponse
- wantErr bool
- }{
- {name: "Create file share acl dock test", args: args{
- ctx: context.Background(),
- opt: req,
- }, want: want1, wantErr: false},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- ds := &dockServer{
- Port: tt.fields.Port,
- Discoverer: tt.fields.Discoverer,
- Driver: tt.fields.Driver,
- MetricDriver: tt.fields.MetricDriver,
- FileShareDriver: tt.fields.FileShareDriver,
- }
- _, err := ds.CreateFileShareAcl(tt.args.ctx, tt.args.opt)
- if (err != nil) != tt.wantErr {
- t.Errorf("dockServer.CreateFileShareAcl() error = %v", err)
- }
- })
- }
-}
-
-func Test_dockServer_DeleteFileShareAcl(t *testing.T) {
- type fields struct {
- Port string
- Discoverer discovery.DockDiscoverer
- Driver drivers.VolumeDriver
- MetricDriver drivers.MetricDriver
- FileShareDriver filesharedrivers.FileShareDriver
- }
- type args struct {
- ctx context.Context
- opt *pb.DeleteFileShareAclOpts
- }
- var req = &pb.DeleteFileShareAclOpts{
- Id: "d2975ebe-d82c-430f-b28e-f373746a71ca",
- Description: "This is a sample Acl for testing",
- }
- want1 := &pb.GenericResponse{
- Reply: nil,
- }
- tests := []struct {
- name string
- fields fields
- args args
- want *pb.GenericResponse
- wantErr bool
- }{
- {name: "Delete file share acl dock test", args: args{
- ctx: context.Background(),
- opt: req,
- }, want: want1, wantErr: false},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- ds := &dockServer{
- Port: tt.fields.Port,
- Discoverer: tt.fields.Discoverer,
- Driver: tt.fields.Driver,
- MetricDriver: tt.fields.MetricDriver,
- FileShareDriver: tt.fields.FileShareDriver,
- }
- _, err := ds.DeleteFileShareAcl(tt.args.ctx, tt.args.opt)
- if (err != nil) != tt.wantErr {
- t.Errorf("dockServer.DeleteFileShareAcl() error = %v", err)
- }
- })
- }
-}
-
-func Test_dockServer_CreateFileShare(t *testing.T) {
- type fields struct {
- Port string
- Discoverer discovery.DockDiscoverer
- Driver drivers.VolumeDriver
- MetricDriver drivers.MetricDriver
- FileShareDriver filesharedrivers.FileShareDriver
- }
- type args struct {
- ctx context.Context
- opt *pb.CreateFileShareOpts
- }
- prf := &data.SampleFileShareProfiles[0]
- var req = &pb.CreateFileShareOpts{
- Id: "bd5b12a8-a101-11e7-941e-d77981b584d8",
- Name: "sample-fileshare",
- Description: "This is a sample fileshare for testing",
- Size: 1,
- PoolId: "084bf71e-a102-11e7-88a8-e31fe6d52248",
- Profile: prf.ToJson(),
- }
- want1 := &pb.GenericResponse{
- Reply: &pb.GenericResponse_Result_{
- Result: &pb.GenericResponse_Result{
- Message: data.ByteFileShare,
- },
- },
- }
- tests := []struct {
- name string
- fields fields
- args args
- want *pb.GenericResponse
- wantErr bool
- }{
- {name: "Create file share dock test", args: args{
- ctx: context.Background(),
- opt: req,
- }, want: want1, wantErr: false},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- ds := &dockServer{
- Port: tt.fields.Port,
- Discoverer: tt.fields.Discoverer,
- Driver: tt.fields.Driver,
- MetricDriver: tt.fields.MetricDriver,
- FileShareDriver: tt.fields.FileShareDriver,
- }
- _, err := ds.CreateFileShare(tt.args.ctx, tt.args.opt)
- if (err != nil) != tt.wantErr {
- t.Errorf("dockServer.CreateFileShare() failed error = %v", err)
- }
- })
- }
-}
-
-func Test_dockServer_DeleteFileShare(t *testing.T) {
- type fields struct {
- Port string
- Discoverer discovery.DockDiscoverer
- Driver drivers.VolumeDriver
- MetricDriver drivers.MetricDriver
- FileShareDriver filesharedrivers.FileShareDriver
- }
- type args struct {
- ctx context.Context
- opt *pb.DeleteFileShareOpts
- }
- var req = &pb.DeleteFileShareOpts{
- Id: "bd5b12a8-a101-11e7-941e-d77981b584d8",
- Name: "sample-fileshare",
- }
- want1 := &pb.GenericResponse{
- Reply: nil,
- }
- tests := []struct {
- name string
- fields fields
- args args
- want *pb.GenericResponse
- wantErr bool
- }{
- {name: "Delete file share dock test", args: args{
- ctx: context.Background(),
- opt: req,
- }, want: want1, wantErr: false},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- ds := &dockServer{
- Port: tt.fields.Port,
- Discoverer: tt.fields.Discoverer,
- Driver: tt.fields.Driver,
- MetricDriver: tt.fields.MetricDriver,
- FileShareDriver: tt.fields.FileShareDriver,
- }
- _, err := ds.DeleteFileShare(tt.args.ctx, tt.args.opt)
- if err != nil {
- t.Errorf("dockServer.DeleteFileShare() error = %v", err)
- }
- })
- }
-}
-
-func Test_dockServer_CreateFileShareSnapshot(t *testing.T) {
- type fields struct {
- Port string
- Discoverer discovery.DockDiscoverer
- Driver drivers.VolumeDriver
- MetricDriver drivers.MetricDriver
- FileShareDriver filesharedrivers.FileShareDriver
- }
- type args struct {
- ctx context.Context
- opt *pb.CreateFileShareSnapshotOpts
- }
- var req = &pb.CreateFileShareSnapshotOpts{
- Id: "3769855c-a102-11e7-b772-17b880d2f537",
- FileshareId: "bd5b12a8-a101-11e7-941e-d77981b584d8",
- Name: "sample-snapshot-01",
- Description: "This is the first sample snapshot for testing",
- Size: int64(1),
- }
- want1 := &pb.GenericResponse{
- Reply: &pb.GenericResponse_Result_{
- Result: &pb.GenericResponse_Result{
- Message: data.ByteFileShareSnapshot,
- },
- },
- }
- tests := []struct {
- name string
- fields fields
- args args
- want *pb.GenericResponse
- wantErr bool
- }{
- {name: "Create file share snapshot dock test", args: args{
- ctx: context.Background(),
- opt: req,
- }, want: want1, wantErr: false},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- ds := &dockServer{
- Port: tt.fields.Port,
- Discoverer: tt.fields.Discoverer,
- Driver: tt.fields.Driver,
- MetricDriver: tt.fields.MetricDriver,
- FileShareDriver: tt.fields.FileShareDriver,
- }
- _, err := ds.CreateFileShareSnapshot(tt.args.ctx, tt.args.opt)
- if err != nil {
- t.Errorf("dockServer.CreateFileShareSnapshot() failed error = %v", err)
- }
-
- })
- }
-}
-
-func Test_dockServer_DeleteFileShareSnapshot(t *testing.T) {
- type fields struct {
- Port string
- Discoverer discovery.DockDiscoverer
- Driver drivers.VolumeDriver
- MetricDriver drivers.MetricDriver
- FileShareDriver filesharedrivers.FileShareDriver
- }
- type args struct {
- ctx context.Context
- opt *pb.DeleteFileShareSnapshotOpts
- }
- req := &pb.DeleteFileShareSnapshotOpts{
- Id: "3769855c-a102-11e7-b772-17b880d2f537",
- FileshareId: "bd5b12a8-a101-11e7-941e-d77981b584d8",
- }
- want1 := &pb.GenericResponse{
- Reply: nil,
- }
- tests := []struct {
- name string
- fields fields
- args args
- want *pb.GenericResponse
- wantErr bool
- }{
- {name: "Delete file share snapshot dock test", args: args{
- ctx: context.Background(),
- opt: req,
- }, want: want1, wantErr: false},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- ds := &dockServer{
- Port: tt.fields.Port,
- Discoverer: tt.fields.Discoverer,
- Driver: tt.fields.Driver,
- MetricDriver: tt.fields.MetricDriver,
- FileShareDriver: tt.fields.FileShareDriver,
- }
- _, err := ds.DeleteFileShareSnapshot(tt.args.ctx, tt.args.opt)
- if err != nil {
- t.Errorf("dockServer.DeleteFileShareSnapshot() error = %v", err)
- }
- })
- }
-}
diff --git a/contrib/drivers/utils/config/config.go b/pkg/driverconfig/config.go
similarity index 97%
rename from contrib/drivers/utils/config/config.go
rename to pkg/driverconfig/config.go
index df39eaa34..e560a16ed 100644
--- a/contrib/drivers/utils/config/config.go
+++ b/pkg/driverconfig/config.go
@@ -23,7 +23,7 @@ import (
"io/ioutil"
log "github.com/golang/glog"
- "github.com/opensds/opensds/pkg/model"
+ "github.com/sodafoundation/controller/pkg/model"
"gopkg.in/yaml.v2"
)
diff --git a/contrib/drivers/utils/config/config_test.go b/pkg/driverconfig/config_test.go
similarity index 97%
rename from contrib/drivers/utils/config/config_test.go
rename to pkg/driverconfig/config_test.go
index d4dc1a220..9c03e5313 100644
--- a/contrib/drivers/utils/config/config_test.go
+++ b/pkg/driverconfig/config_test.go
@@ -18,7 +18,7 @@ import (
"reflect"
"testing"
- "github.com/opensds/opensds/pkg/model"
+ "github.com/sodafoundation/controller/pkg/model"
)
type Config struct {
diff --git a/contrib/drivers/utils/config/constants.go b/pkg/driverconfig/constants.go
similarity index 100%
rename from contrib/drivers/utils/config/constants.go
rename to pkg/driverconfig/constants.go
diff --git a/contrib/drivers/utils/config/testdata/config.yaml b/pkg/driverconfig/testdata/config.yaml
similarity index 100%
rename from contrib/drivers/utils/config/testdata/config.yaml
rename to pkg/driverconfig/testdata/config.yaml
diff --git a/pkg/model/replication.go b/pkg/model/replication.go
index ffc27bf0f..82d392f5c 100755
--- a/pkg/model/replication.go
+++ b/pkg/model/replication.go
@@ -18,7 +18,7 @@ This module implements the common data structure.
package model
-import "github.com/opensds/opensds/pkg/model/proto"
+import "github.com/sodafoundation/controller/pkg/model/proto"
const (
ReplicationModeSync = "sync"
diff --git a/pkg/utils/config/config.go b/pkg/utils/config/config.go
index 5c836b34c..2d0a6515b 100755
--- a/pkg/utils/config/config.go
+++ b/pkg/utils/config/config.go
@@ -26,7 +26,7 @@ import (
"time"
"github.com/go-ini/ini"
- "github.com/opensds/opensds/pkg/utils/constants"
+ "github.com/sodafoundation/controller/pkg/utils/constants"
)
const (
diff --git a/pkg/utils/config/config_define.go b/pkg/utils/config/config_define.go
index 8eb9dd1db..bdb7a4a43 100755
--- a/pkg/utils/config/config_define.go
+++ b/pkg/utils/config/config_define.go
@@ -26,8 +26,8 @@ type OsdsApiServer struct {
PolicyPath string `conf:"policy_path,/etc/opensds/policy.json"`
LogFlushFrequency time.Duration `conf:"log_flush_frequency,5s"` // Default value is 5s
HTTPSEnabled bool `conf:"https_enabled,false"`
- BeegoHTTPSCertFile string `conf:"beego_https_cert_file,/opt/opensds-security/opensds/opensds-cert.pem"`
- BeegoHTTPSKeyFile string `conf:"beego_https_key_file,/opt/opensds-security/opensds/opensds-key.pem"`
+ BeegoHTTPSCertFile string `conf:"beego_https_cert_file,/opt/opensds-security/sodafoundation/controller-cert.pem"`
+ BeegoHTTPSKeyFile string `conf:"beego_https_key_file,/opt/opensds-security/sodafoundation/controller-key.pem"`
BeegoServerTimeOut int64 `conf:"beego_server_time_out,120"`
// prometheus related
diff --git a/pkg/utils/config/testdata/opensds.conf b/pkg/utils/config/testdata/opensds.conf
index b9cd698b0..dbf887a7e 100755
--- a/pkg/utils/config/testdata/opensds.conf
+++ b/pkg/utils/config/testdata/opensds.conf
@@ -3,8 +3,8 @@ api_endpoint = localhost:50040
log_flush_frequency = 2s
auth_strategy = keystone
# If https is enabled, the default value of cert file
-# is /opt/opensds-security/opensds/opensds-cert.pem,
-# and key file is /opt/opensds-security/opensds/opensds-key.pem
+# is /opt/opensds-security/sodafoundation/controller-cert.pem,
+# and key file is /opt/opensds-security/sodafoundation/controller-key.pem
https_enabled = False
beego_https_cert_file =
beego_https_key_file =
diff --git a/pkg/utils/logs/logs.go b/pkg/utils/logs/logs.go
index 572e8c79a..e1f32c159 100755
--- a/pkg/utils/logs/logs.go
+++ b/pkg/utils/logs/logs.go
@@ -24,7 +24,7 @@ import (
"time"
"github.com/golang/glog"
- "github.com/opensds/opensds/pkg/utils"
+ "github.com/sodafoundation/controller/pkg/utils"
)
const DefaultLogDir = "/var/log/opensds"
diff --git a/pkg/utils/urls/urls.go b/pkg/utils/urls/urls.go
index 1fb1e6c4e..aee2a6c46 100644
--- a/pkg/utils/urls/urls.go
+++ b/pkg/utils/urls/urls.go
@@ -17,7 +17,7 @@ package urls
import (
"strings"
- "github.com/opensds/opensds/pkg/utils/constants"
+ "github.com/sodafoundation/controller/pkg/utils/constants"
)
const (
diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go
index b3a35fe39..64c58776a 100755
--- a/pkg/utils/utils.go
+++ b/pkg/utils/utils.go
@@ -25,7 +25,7 @@ import (
"strings"
"time"
- "github.com/opensds/opensds/pkg/utils/constants"
+ "github.com/sodafoundation/controller/pkg/utils/constants"
log "github.com/golang/glog"
)
diff --git a/pkg/utils/utils_test.go b/pkg/utils/utils_test.go
index 1434568cf..30070ecb3 100644
--- a/pkg/utils/utils_test.go
+++ b/pkg/utils/utils_test.go
@@ -19,8 +19,8 @@ import (
"reflect"
"testing"
- "github.com/opensds/opensds/pkg/model"
- "github.com/opensds/opensds/pkg/utils/constants"
+ "github.com/sodafoundation/controller/pkg/model"
+ "github.com/sodafoundation/controller/pkg/utils/constants"
)
func TestRvRepElement(t *testing.T) {
diff --git a/test/e2e/connector/connector.go b/test/e2e/connector/connector.go
index 85243f161..8cb5fbebb 100644
--- a/test/e2e/connector/connector.go
+++ b/test/e2e/connector/connector.go
@@ -19,9 +19,9 @@ import (
"fmt"
"os"
- "github.com/opensds/opensds/contrib/connector"
- _ "github.com/opensds/opensds/contrib/connector/iscsi"
- _ "github.com/opensds/opensds/contrib/connector/nvmeof"
+ "github.com/sodafoundation/controller/contrib/connector"
+ _ "github.com/sodafoundation/controller/contrib/connector/iscsi"
+ _ "github.com/sodafoundation/controller/contrib/connector/nvmeof"
)
const (
diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go
index 7dc2ed715..c5fb971db 100644
--- a/test/e2e/e2e_test.go
+++ b/test/e2e/e2e_test.go
@@ -23,9 +23,9 @@ import (
"runtime"
"testing"
- "github.com/opensds/opensds/client"
- "github.com/opensds/opensds/pkg/model"
- "github.com/opensds/opensds/pkg/utils/constants"
+ "github.com/sodafoundation/controller/client"
+ "github.com/sodafoundation/controller/pkg/model"
+ "github.com/sodafoundation/controller/pkg/utils/constants"
)
var (
diff --git a/test/e2e/e2ef_test.go b/test/e2e/e2ef_test.go
index e228cee5d..2b8f06e59 100644
--- a/test/e2e/e2ef_test.go
+++ b/test/e2e/e2ef_test.go
@@ -26,9 +26,9 @@ import (
"strings"
"testing"
- "github.com/opensds/opensds/client"
- "github.com/opensds/opensds/pkg/model"
- "github.com/opensds/opensds/pkg/utils/constants"
+ "github.com/sodafoundation/controller/client"
+ "github.com/sodafoundation/controller/pkg/model"
+ "github.com/sodafoundation/controller/pkg/utils/constants"
)
const (
diff --git a/test/e2e/syncclient.go b/test/e2e/syncclient.go
index 27c56c652..09efc1e94 100644
--- a/test/e2e/syncclient.go
+++ b/test/e2e/syncclient.go
@@ -18,9 +18,9 @@ import (
"fmt"
"time"
- "github.com/opensds/opensds/client"
- "github.com/opensds/opensds/pkg/model"
- "github.com/opensds/opensds/pkg/utils"
+ "github.com/sodafoundation/controller/client"
+ "github.com/sodafoundation/controller/pkg/model"
+ "github.com/sodafoundation/controller/pkg/utils"
)
const (
diff --git a/test/integration/client_test.go b/test/integration/client_test.go
index e7a840f92..146c42707 100755
--- a/test/integration/client_test.go
+++ b/test/integration/client_test.go
@@ -20,10 +20,10 @@ import (
"reflect"
"testing"
- "github.com/opensds/opensds/client"
- "github.com/opensds/opensds/pkg/model"
- "github.com/opensds/opensds/pkg/utils/constants"
- . "github.com/opensds/opensds/testutils/collection"
+ "github.com/sodafoundation/controller/client"
+ "github.com/sodafoundation/controller/pkg/model"
+ "github.com/sodafoundation/controller/pkg/utils/constants"
+ . "github.com/sodafoundation/controller/testutils/collection"
)
var c *client.Client
diff --git a/test/integration/controller_test.go b/test/integration/controller_test.go
index f2a45a2c5..8c0fca4ce 100755
--- a/test/integration/controller_test.go
+++ b/test/integration/controller_test.go
@@ -20,10 +20,10 @@ import (
"reflect"
"testing"
- "github.com/opensds/opensds/pkg/controller/volume"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- . "github.com/opensds/opensds/testutils/collection"
+ "github.com/sodafoundation/controller/pkg/controller/volume"
+ "github.com/sodafoundation/controller/pkg/model"
+ pb "github.com/sodafoundation/controller/pkg/model/proto"
+ . "github.com/sodafoundation/controller/testutils/collection"
)
var (
diff --git a/test/integration/prepare.sh b/test/integration/prepare.sh
index 9a9d7ca12..9162413a5 100755
--- a/test/integration/prepare.sh
+++ b/test/integration/prepare.sh
@@ -19,7 +19,7 @@ TOP_DIR=$(cd $(dirname "$0") && pwd)
# OpenSDS Root directory
OPENSDS_DIR=$(cd $TOP_DIR/../.. && pwd)
-OPENSDS_CONF=/etc/opensds/opensds.conf
+OPENSDS_CONF=/etc/sodafoundation/controller.conf
# Config backend info.
mkdir -p /etc/opensds
diff --git a/testutils/collection/data.go b/testutils/collection/data.go
index 147377827..7e664b86d 100644
--- a/testutils/collection/data.go
+++ b/testutils/collection/data.go
@@ -20,7 +20,7 @@ This package includes a collection of fake stuffs for testing work.
package collection
import (
- "github.com/opensds/opensds/pkg/model"
+ "github.com/sodafoundation/controller/pkg/model"
)
var (
@@ -649,7 +649,7 @@ var (
// The Byte*** variable here is designed for unit test in client package.
// For how to ultilize these pre-assigned variables, please refer to
-// (github.com/opensds/opensds/client/dock_test.go).
+// (github.com/sodafoundation/controller/client/dock_test.go).
var (
ByteProfile = `{
"id": "1106b972-66ef-11e7-b172-db03f3689c9c",
@@ -1122,7 +1122,7 @@ var (
// The StringSlice*** variable here is designed for unit test in etcd package.
// For how to ultilize these pre-assigned variables, please refer to
-// (github.com/opensds/opensds/pkg/db/drivers/etcd/etcd_test.go).
+// (github.com/sodafoundation/controller/pkg/db/drivers/etcd/etcd_test.go).
var (
StringSliceProfiles = []string{
`{
diff --git a/testutils/controller/testing/client.go b/testutils/controller/testing/client.go
index 3f6e86384..b628d9364 100644
--- a/testutils/controller/testing/client.go
+++ b/testutils/controller/testing/client.go
@@ -5,7 +5,7 @@ package mocks
import context "context"
import grpc "google.golang.org/grpc"
import mock "github.com/stretchr/testify/mock"
-import proto "github.com/opensds/opensds/pkg/model/proto"
+import proto "github.com/sodafoundation/controller/pkg/model/proto"
// Client is an autogenerated mock type for the Client type
type Client struct {
diff --git a/testutils/db/fake.go b/testutils/db/fake.go
index 2ec7e1bf7..8b27050f5 100755
--- a/testutils/db/fake.go
+++ b/testutils/db/fake.go
@@ -17,9 +17,9 @@ package db
import (
"errors"
- c "github.com/opensds/opensds/pkg/context"
- "github.com/opensds/opensds/pkg/model"
- . "github.com/opensds/opensds/testutils/collection"
+ c "github.com/sodafoundation/controller/pkg/context"
+ "github.com/sodafoundation/controller/pkg/model"
+ . "github.com/sodafoundation/controller/testutils/collection"
)
// FakeDbClient
diff --git a/testutils/db/testing/client.go b/testutils/db/testing/client.go
index 19b98a729..9b687915a 100644
--- a/testutils/db/testing/client.go
+++ b/testutils/db/testing/client.go
@@ -2,10 +2,10 @@
package mocks
-import context "github.com/opensds/opensds/pkg/context"
+import context "github.com/sodafoundation/controller/pkg/context"
import mock "github.com/stretchr/testify/mock"
-import model "github.com/opensds/opensds/pkg/model"
+import model "github.com/sodafoundation/controller/pkg/model"
// Client is an autogenerated mock type for the Client type
type Client struct {
diff --git a/testutils/dock/testing/client.go b/testutils/dock/testing/client.go
index 246567c8a..257352461 100644
--- a/testutils/dock/testing/client.go
+++ b/testutils/dock/testing/client.go
@@ -5,7 +5,7 @@ package mocks
import context "context"
import grpc "google.golang.org/grpc"
import mock "github.com/stretchr/testify/mock"
-import proto "github.com/opensds/opensds/pkg/model/proto"
+import proto "github.com/sodafoundation/controller/pkg/model/proto"
// Client is an autogenerated mock type for the Client type
type Client struct {
diff --git a/testutils/driver/replication_sample.go b/testutils/driver/replication_sample.go
index f85489a5a..4c0f2601a 100644
--- a/testutils/driver/replication_sample.go
+++ b/testutils/driver/replication_sample.go
@@ -23,9 +23,9 @@ package sample
import (
//"errors"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- . "github.com/opensds/opensds/testutils/collection"
+ "github.com/sodafoundation/controller/pkg/model"
+ pb "github.com/sodafoundation/controller/pkg/model/proto"
+ . "github.com/sodafoundation/controller/testutils/collection"
)
// ReplicationDriver
diff --git a/testutils/driver/sample.go b/testutils/driver/sample.go
index 2bd477149..576c20ce9 100644
--- a/testutils/driver/sample.go
+++ b/testutils/driver/sample.go
@@ -23,9 +23,9 @@ package sample
import (
"errors"
- "github.com/opensds/opensds/pkg/model"
- pb "github.com/opensds/opensds/pkg/model/proto"
- . "github.com/opensds/opensds/testutils/collection"
+ "github.com/sodafoundation/controller/pkg/model"
+ pb "github.com/sodafoundation/controller/pkg/model/proto"
+ . "github.com/sodafoundation/controller/testutils/collection"
)
// Driver
diff --git a/testutils/driver/testing/replication_driver.go b/testutils/driver/testing/replication_driver.go
index afe8885c4..782b1e7c9 100644
--- a/testutils/driver/testing/replication_driver.go
+++ b/testutils/driver/testing/replication_driver.go
@@ -3,8 +3,8 @@
package mocks
import mock "github.com/stretchr/testify/mock"
-import model "github.com/opensds/opensds/pkg/model"
-import proto "github.com/opensds/opensds/pkg/model/proto"
+import model "github.com/sodafoundation/controller/pkg/model"
+import proto "github.com/sodafoundation/controller/pkg/model/proto"
// ReplicationDriver is an autogenerated mock type for the ReplicationDriver type
type ReplicationDriver struct {
diff --git a/testutils/driver/testing/volume_driver.go b/testutils/driver/testing/volume_driver.go
index 71d746a6f..477367346 100644
--- a/testutils/driver/testing/volume_driver.go
+++ b/testutils/driver/testing/volume_driver.go
@@ -3,8 +3,8 @@
package mocks
import mock "github.com/stretchr/testify/mock"
-import model "github.com/opensds/opensds/pkg/model"
-import proto "github.com/opensds/opensds/pkg/model/proto"
+import model "github.com/sodafoundation/controller/pkg/model"
+import proto "github.com/sodafoundation/controller/pkg/model/proto"
// VolumeDriver is an autogenerated mock type for the VolumeDriver type
type VolumeDriver struct {