From 0987a28033a836cd38198c78239ca8bf920b4434 Mon Sep 17 00:00:00 2001 From: panda-sheep <59197347+panda-sheep@users.noreply.github.com> Date: Wed, 31 Mar 2021 05:18:01 +0800 Subject: [PATCH] Meta handles leader changes and other errorcodes uniformly --- src/daemons/MetaDaemon.cpp | 4 - src/kvstore/NebulaStore.cpp | 3 +- src/meta/ActiveHostsMan.cpp | 118 ++++-- src/meta/ActiveHostsMan.h | 46 ++- src/meta/MetaServiceUtils.cpp | 141 ------- src/meta/MetaServiceUtils.h | 12 - src/meta/common/MetaCommon.h | 3 + src/meta/processors/BaseProcessor.h | 74 ++-- src/meta/processors/BaseProcessor.inl | 381 +++++++++++++----- src/meta/processors/admin/AdminClient.cpp | 149 ++++--- src/meta/processors/admin/AdminClient.h | 3 +- src/meta/processors/admin/BalancePlan.cpp | 15 +- .../processors/admin/BalanceProcessor.cpp | 31 +- src/meta/processors/admin/BalanceTask.cpp | 10 +- src/meta/processors/admin/Balancer.cpp | 218 ++++++---- src/meta/processors/admin/Balancer.h | 56 +-- .../admin/CreateBackupProcessor.cpp | 81 ++-- .../processors/admin/CreateBackupProcessor.h | 8 +- .../admin/CreateSnapshotProcessor.cpp | 29 +- .../admin/DropSnapshotProcessor.cpp | 26 +- src/meta/processors/admin/HBProcessor.cpp | 29 +- .../admin/ListSnapshotsProcessor.cpp | 23 +- .../processors/admin/RestoreProcessor.cpp | 34 +- src/meta/processors/admin/SnapShot.cpp | 47 ++- src/meta/processors/admin/SnapShot.h | 2 +- .../configMan/GetConfigProcessor.cpp | 43 +- .../processors/configMan/GetConfigProcessor.h | 5 +- .../configMan/ListConfigsProcessor.cpp | 12 +- .../configMan/RegConfigProcessor.cpp | 12 +- .../configMan/SetConfigProcessor.cpp | 9 +- src/meta/processors/customKV/GetProcessor.cpp | 11 +- .../processors/customKV/MultiGetProcessor.cpp | 10 +- .../processors/customKV/ScanProcessor.cpp | 10 +- .../indexMan/CreateEdgeIndexProcessor.cpp | 49 ++- .../indexMan/CreateTagIndexProcessor.cpp | 49 ++- .../indexMan/DropEdgeIndexProcessor.cpp | 27 +- .../indexMan/DropTagIndexProcessor.cpp | 26 +- .../indexMan/FTServiceProcessor.cpp | 40 +- .../indexMan/GetEdgeIndexProcessor.cpp | 27 +- .../indexMan/GetTagIndexProcessor.cpp | 29 +- .../indexMan/ListEdgeIndexesProcessor.cpp | 22 +- .../indexMan/ListTagIndexesProcessor.cpp | 20 +- .../processors/jobMan/AdminJobProcessor.cpp | 6 +- src/meta/processors/jobMan/JobDescription.cpp | 19 +- src/meta/processors/jobMan/JobDescription.h | 4 +- src/meta/processors/jobMan/JobManager.cpp | 177 ++++---- src/meta/processors/jobMan/JobManager.h | 8 +- .../jobMan/ListEdgeIndexStatusProcessor.cpp | 24 +- .../jobMan/ListTagIndexStatusProcessor.cpp | 25 +- .../processors/jobMan/MetaJobExecutor.cpp | 33 +- src/meta/processors/jobMan/MetaJobExecutor.h | 4 +- .../processors/jobMan/RebuildJobExecutor.cpp | 8 +- .../processors/jobMan/StatisJobExecutor.cpp | 49 ++- .../processors/jobMan/StatisJobExecutor.h | 6 +- .../listenerMan/ListenerProcessor.cpp | 75 ++-- .../partsMan/CreateSpaceProcessor.cpp | 85 ++-- .../partsMan/CreateSpaceProcessor.h | 2 +- .../partsMan/DropSpaceProcessor.cpp | 63 ++- .../partsMan/GetPartsAllocProcessor.cpp | 14 +- .../processors/partsMan/GetSpaceProcessor.cpp | 29 +- .../partsMan/ListHostsProcessor.cpp | 115 +++--- .../processors/partsMan/ListHostsProcessor.h | 13 +- .../partsMan/ListPartsProcessor.cpp | 78 ++-- .../processors/partsMan/ListPartsProcessor.h | 5 +- .../partsMan/ListSpacesProcessor.cpp | 15 +- .../schemaMan/AlterEdgeProcessor.cpp | 47 ++- .../schemaMan/AlterTagProcessor.cpp | 47 ++- .../schemaMan/CreateEdgeProcessor.cpp | 43 +- .../schemaMan/CreateTagProcessor.cpp | 42 +- .../schemaMan/DropEdgeProcessor.cpp | 55 ++- .../processors/schemaMan/DropEdgeProcessor.h | 3 +- .../processors/schemaMan/DropTagProcessor.cpp | 54 ++- .../processors/schemaMan/DropTagProcessor.h | 3 +- .../processors/schemaMan/GetEdgeProcessor.cpp | 54 ++- .../processors/schemaMan/GetTagProcessor.cpp | 57 +-- .../schemaMan/ListEdgesProcessor.cpp | 15 +- .../schemaMan/ListTagsProcessor.cpp | 16 +- .../usersMan/AuthenticationProcessor.cpp | 199 +++++---- .../usersMan/AuthenticationProcessor.h | 4 +- .../processors/zoneMan/AddGroupProcessor.cpp | 35 +- .../processors/zoneMan/AddGroupProcessor.h | 5 +- .../processors/zoneMan/AddZoneProcessor.cpp | 46 ++- .../processors/zoneMan/AddZoneProcessor.h | 2 +- .../processors/zoneMan/DropGroupProcessor.cpp | 40 +- .../processors/zoneMan/DropGroupProcessor.h | 2 +- .../processors/zoneMan/DropZoneProcessor.cpp | 39 +- .../processors/zoneMan/DropZoneProcessor.h | 2 +- .../processors/zoneMan/GetGroupProcessor.cpp | 23 +- .../processors/zoneMan/GetZoneProcessor.cpp | 22 +- .../zoneMan/ListGroupsProcessor.cpp | 12 +- .../processors/zoneMan/ListZonesProcessor.cpp | 12 +- .../zoneMan/UpdateGroupProcessor.cpp | 49 ++- .../zoneMan/UpdateZoneProcessor.cpp | 47 ++- src/meta/test/ActiveHostsManTest.cpp | 37 +- src/meta/test/AdminClientTest.cpp | 8 +- src/meta/test/BalancerTest.cpp | 34 +- src/meta/test/GetStatisTest.cpp | 86 ++-- src/meta/test/GroupZoneTest.cpp | 4 +- src/meta/test/HBProcessorTest.cpp | 10 +- src/meta/test/JobManagerTest.cpp | 34 +- src/meta/test/MetaClientTest.cpp | 4 +- src/meta/test/ProcessorTest.cpp | 6 +- src/meta/test/TestUtils.h | 2 +- 103 files changed, 2442 insertions(+), 1559 deletions(-) diff --git a/src/daemons/MetaDaemon.cpp b/src/daemons/MetaDaemon.cpp index 2d3b4185c..68d6c47e2 100644 --- a/src/daemons/MetaDaemon.cpp +++ b/src/daemons/MetaDaemon.cpp @@ -47,10 +47,8 @@ DEFINE_string(meta_server_addrs, DEFINE_int32(num_io_threads, 16, "Number of IO threads"); DEFINE_int32(meta_http_thread_num, 3, "Number of meta daemon's http thread"); DEFINE_int32(num_worker_threads, 32, "Number of workers"); - DEFINE_string(pid_file, "pids/nebula-metad.pid", "File to hold the process id"); DEFINE_bool(daemonize, true, "Whether run as a daemon process"); -DECLARE_bool(check_leader); static std::unique_ptr gServer; static std::unique_ptr gKVStore; @@ -81,8 +79,6 @@ std::unique_ptr initKV(std::vector p FLAGS_num_worker_threads, true /*stats*/)); threadManager->setNamePrefix("executor"); threadManager->start(); - // On metad, we are allowed to read on follower - FLAGS_check_leader = false; nebula::kvstore::KVOptions options; options.dataPaths_ = {FLAGS_data_path}; options.partMan_ = std::move(partMan); diff --git a/src/kvstore/NebulaStore.cpp b/src/kvstore/NebulaStore.cpp index aa1dfa6ec..730751b64 100644 --- a/src/kvstore/NebulaStore.cpp +++ b/src/kvstore/NebulaStore.cpp @@ -19,7 +19,6 @@ DEFINE_string(engine_type, "rocksdb", "rocksdb, memory..."); DEFINE_int32(custom_filter_interval_secs, 24 * 3600, "interval to trigger custom compaction, < 0 means always do default minor compaction"); DEFINE_int32(num_workers, 4, "Number of worker threads"); -DEFINE_bool(check_leader, true, "Check leader or not"); DEFINE_int32(clean_wal_interval_secs, 600, "inerval to trigger clean expired wal"); DEFINE_bool(auto_remove_invalid_space, false, "whether remove data of invalid space when restart"); @@ -1027,7 +1026,7 @@ int32_t NebulaStore::allLeader(std::unordered_map part, bool canReadFromFollower) const { - return !FLAGS_check_leader || canReadFromFollower || (part->isLeader() && part->leaseValid()); + return canReadFromFollower || (part->isLeader() && part->leaseValid()); } void NebulaStore::cleanWAL() { diff --git a/src/meta/ActiveHostsMan.cpp b/src/meta/ActiveHostsMan.cpp index 2e29c7b92..019f108e2 100644 --- a/src/meta/ActiveHostsMan.cpp +++ b/src/meta/ActiveHostsMan.cpp @@ -6,6 +6,7 @@ #include "meta/ActiveHostsMan.h" #include "meta/processors/Common.h" +#include "meta/common/MetaCommon.h" #include "utils/Utils.h" DECLARE_int32(heartbeat_interval_secs); @@ -14,10 +15,10 @@ DECLARE_uint32(expired_time_factor); namespace nebula { namespace meta { -kvstore::ResultCode ActiveHostsMan::updateHostInfo(kvstore::KVStore* kv, - const HostAddr& hostAddr, - const HostInfo& info, - const LeaderParts* leaderParts) { +cpp2::ErrorCode ActiveHostsMan::updateHostInfo(kvstore::KVStore* kv, + const HostAddr& hostAddr, + const HostInfo& info, + const LeaderParts* leaderParts) { CHECK_NOTNULL(kv); std::vector data; data.emplace_back(MetaServiceUtils::hostKey(hostAddr.host, hostAddr.port), @@ -35,20 +36,22 @@ kvstore::ResultCode ActiveHostsMan::updateHostInfo(kvstore::KVStore* kv, baton.post(); }); baton.wait(); - return ret; + return MetaCommon::to(ret); } -std::vector ActiveHostsMan::getActiveHosts(kvstore::KVStore* kv, - int32_t expiredTTL, - cpp2::HostRole role) { - std::vector hosts; +ErrorOr> +ActiveHostsMan::getActiveHosts(kvstore::KVStore* kv, int32_t expiredTTL, cpp2::HostRole role) { const auto& prefix = MetaServiceUtils::hostPrefix(); std::unique_ptr iter; auto ret = kv->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); if (ret != kvstore::ResultCode::SUCCEEDED) { - FLOG_ERROR("getActiveHosts failed(%d)", static_cast(ret)); - return hosts; + auto retCode = MetaCommon::to(ret); + LOG(ERROR) << "Failed to getActiveHosts, error " + << static_cast(retCode); + return retCode; } + + std::vector hosts; int64_t threshold = (expiredTTL == 0 ? FLAGS_heartbeat_interval_secs * FLAGS_expired_time_factor : expiredTTL) * 1000; @@ -67,16 +70,19 @@ std::vector ActiveHostsMan::getActiveHosts(kvstore::KVStore* kv, return hosts; } -std::vector ActiveHostsMan::getActiveHostsInZone(kvstore::KVStore* kv, - const std::string& zoneName, - int32_t expiredTTL) { +ErrorOr> +ActiveHostsMan::getActiveHostsInZone(kvstore::KVStore* kv, + const std::string& zoneName, + int32_t expiredTTL) { std::vector activeHosts; std::string zoneValue; auto zoneKey = MetaServiceUtils::zoneKey(zoneName); auto ret = kv->get(kDefaultSpaceId, kDefaultPartId, zoneKey, &zoneValue); if (ret != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Get zone " << zoneName << " failed"; - return activeHosts; + auto retCode = MetaCommon::to(ret); + LOG(ERROR) << "Get zone " << zoneName << " failed, error: " + << static_cast(retCode); + return retCode; } auto hosts = MetaServiceUtils::parseZoneHosts(std::move(zoneValue)); @@ -86,12 +92,11 @@ std::vector ActiveHostsMan::getActiveHostsInZone(kvstore::KVStore* kv, expiredTTL) * 1000; for (auto& host : hosts) { auto infoRet = getHostInfo(kv, host); - if (!infoRet.ok()) { - activeHosts.clear(); - return activeHosts; + if (!nebula::ok(infoRet)) { + return nebula::error(infoRet); } - auto info = infoRet.value(); + auto info = nebula::value(infoRet); if (now - info.lastHBTimeInMilliSec_ < threshold) { activeHosts.emplace_back(host.host, host.port); } @@ -99,16 +104,19 @@ std::vector ActiveHostsMan::getActiveHostsInZone(kvstore::KVStore* kv, return activeHosts; } -std::vector ActiveHostsMan::getActiveHostsWithGroup(kvstore::KVStore* kv, - GraphSpaceID spaceId, - int32_t expiredTTL) { +ErrorOr> +ActiveHostsMan::getActiveHostsWithGroup(kvstore::KVStore* kv, + GraphSpaceID spaceId, + int32_t expiredTTL) { std::string spaceValue; std::vector activeHosts; auto spaceKey = MetaServiceUtils::spaceKey(spaceId); auto ret = kv->get(kDefaultSpaceId, kDefaultPartId, spaceKey, &spaceValue); if (ret != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Space " << spaceId << " not exist"; - return activeHosts; + auto retCode = MetaCommon::to(ret); + LOG(ERROR) << "Get space failed, error: " + << static_cast(retCode); + return retCode; } std::string groupValue; @@ -116,22 +124,34 @@ std::vector ActiveHostsMan::getActiveHostsWithGroup(kvstore::KVStore* auto groupKey = MetaServiceUtils::groupKey(*space.group_name_ref()); ret = kv->get(kDefaultSpaceId, kDefaultPartId, groupKey, &groupValue); if (ret != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Get group " << *space.group_name_ref() << " failed"; - return activeHosts; + auto retCode = MetaCommon::to(ret); + LOG(ERROR) << "Get group " << *space.group_name_ref() << " failed, error: " + << static_cast(retCode); + return retCode; } auto zoneNames = MetaServiceUtils::parseZoneNames(std::move(groupValue)); for (const auto& zoneName : zoneNames) { - auto hosts = getActiveHostsInZone(kv, zoneName, expiredTTL); + auto hostsRet = getActiveHostsInZone(kv, zoneName, expiredTTL); + if (!nebula::ok(hostsRet)) { + return nebula::error(hostsRet); + } + auto hosts = nebula::value(hostsRet); activeHosts.insert(activeHosts.end(), hosts.begin(), hosts.end()); } return activeHosts; } -std::vector ActiveHostsMan::getActiveAdminHosts(kvstore::KVStore* kv, - int32_t expiredTTL, - cpp2::HostRole role) { - auto hosts = getActiveHosts(kv, expiredTTL, role); +ErrorOr> +ActiveHostsMan::getActiveAdminHosts(kvstore::KVStore* kv, + int32_t expiredTTL, + cpp2::HostRole role) { + auto hostsRet = getActiveHosts(kv, expiredTTL, role); + if (!nebula::ok(hostsRet)) { + return nebula::error(hostsRet); + } + auto hosts = nebula::value(hostsRet); + std::vector adminHosts(hosts.size()); std::transform(hosts.begin(), hosts.end(), adminHosts.begin(), [](const auto& h) { return Utils::getAdminAddrFromStoreAddr(h); @@ -139,23 +159,31 @@ std::vector ActiveHostsMan::getActiveAdminHosts(kvstore::KVStore* kv, return adminHosts; } -bool ActiveHostsMan::isLived(kvstore::KVStore* kv, const HostAddr& host) { - auto activeHosts = getActiveHosts(kv); +ErrorOr ActiveHostsMan::isLived(kvstore::KVStore* kv, const HostAddr& host) { + auto activeHostsRet = getActiveHosts(kv); + if (!nebula::ok(activeHostsRet)) { + return nebula::error(activeHostsRet); + } + auto activeHosts = nebula::value(activeHostsRet); + return std::find(activeHosts.begin(), activeHosts.end(), host) != activeHosts.end(); } -StatusOr ActiveHostsMan::getHostInfo(kvstore::KVStore* kv, const HostAddr& host) { +ErrorOr +ActiveHostsMan::getHostInfo(kvstore::KVStore* kv, const HostAddr& host) { auto hostKey = MetaServiceUtils::hostKey(host.host, host.port); std::string hostValue; auto ret = kv->get(kDefaultSpaceId, kDefaultPartId, hostKey, &hostValue); if (ret != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Get host info " << host << " failed"; - return Status::Error("Get host info failed"); + auto retCode = MetaCommon::to(ret); + LOG(ERROR) << "Get host info " << host << " failed, error: " + << static_cast(retCode); + return retCode; } return HostInfo::decode(hostValue); } -kvstore::ResultCode LastUpdateTimeMan::update(kvstore::KVStore* kv, const int64_t timeInMilliSec) { +cpp2::ErrorCode LastUpdateTimeMan::update(kvstore::KVStore* kv, const int64_t timeInMilliSec) { CHECK_NOTNULL(kv); std::vector data; data.emplace_back(MetaServiceUtils::lastUpdateTimeKey(), @@ -170,18 +198,22 @@ kvstore::ResultCode LastUpdateTimeMan::update(kvstore::KVStore* kv, const int64_ baton.post(); }); baton.wait(); - return kv->sync(kDefaultSpaceId, kDefaultPartId); + ret = kv->sync(kDefaultSpaceId, kDefaultPartId); + return MetaCommon::to(ret); } -int64_t LastUpdateTimeMan::get(kvstore::KVStore* kv) { +ErrorOr LastUpdateTimeMan::get(kvstore::KVStore* kv) { CHECK_NOTNULL(kv); auto key = MetaServiceUtils::lastUpdateTimeKey(); std::string val; auto ret = kv->get(kDefaultSpaceId, kDefaultPartId, key, &val); - if (ret == kvstore::ResultCode::SUCCEEDED) { - return *reinterpret_cast(val.data()); + if (ret != kvstore::ResultCode::SUCCEEDED) { + auto retCode = MetaCommon::to(ret); + LOG(ERROR) << "Get last update time failed, error: " + << static_cast(retCode); + return retCode; } - return 0; + return *reinterpret_cast(val.data()); } } // namespace meta diff --git a/src/meta/ActiveHostsMan.h b/src/meta/ActiveHostsMan.h index 94ddfd3b4..ff9a106f9 100644 --- a/src/meta/ActiveHostsMan.h +++ b/src/meta/ActiveHostsMan.h @@ -108,42 +108,48 @@ class ActiveHostsMan final { public: ~ActiveHostsMan() = default; - static kvstore::ResultCode updateHostInfo(kvstore::KVStore* kv, - const HostAddr& hostAddr, - const HostInfo& info, - const LeaderParts* leaderParts = nullptr); + static cpp2::ErrorCode updateHostInfo(kvstore::KVStore* kv, + const HostAddr& hostAddr, + const HostInfo& info, + const LeaderParts* leaderParts = nullptr); - static std::vector getActiveHosts(kvstore::KVStore* kv, - int32_t expiredTTL = 0, - cpp2::HostRole role = cpp2::HostRole::STORAGE); + static ErrorOr> + getActiveHosts(kvstore::KVStore* kv, + int32_t expiredTTL = 0, + cpp2::HostRole role = cpp2::HostRole::STORAGE); - static std::vector getActiveHostsInZone(kvstore::KVStore* kv, - const std::string& zoneName, - int32_t expiredTTL = 0); + static ErrorOr> + getActiveHostsInZone(kvstore::KVStore* kv, + const std::string& zoneName, + int32_t expiredTTL = 0); - static std::vector getActiveHostsWithGroup(kvstore::KVStore* kv, - GraphSpaceID spaceId, - int32_t expiredTTL = 0); + static ErrorOr> + getActiveHostsWithGroup(kvstore::KVStore* kv, + GraphSpaceID spaceId, + int32_t expiredTTL = 0); - static std::vector getActiveAdminHosts(kvstore::KVStore* kv, - int32_t expiredTTL = 0, - cpp2::HostRole role = cpp2::HostRole::STORAGE); + static ErrorOr> + getActiveAdminHosts(kvstore::KVStore* kv, + int32_t expiredTTL = 0, + cpp2::HostRole role = cpp2::HostRole::STORAGE); - static bool isLived(kvstore::KVStore* kv, const HostAddr& host); + static ErrorOr isLived(kvstore::KVStore* kv, const HostAddr& host); - static StatusOr getHostInfo(kvstore::KVStore* kv, const HostAddr& host); + static ErrorOr + getHostInfo(kvstore::KVStore* kv, const HostAddr& host); protected: ActiveHostsMan() = default; }; + class LastUpdateTimeMan final { public: ~LastUpdateTimeMan() = default; - static kvstore::ResultCode update(kvstore::KVStore* kv, const int64_t timeInMilliSec); + static cpp2::ErrorCode update(kvstore::KVStore* kv, const int64_t timeInMilliSec); - static int64_t get(kvstore::KVStore* kv); + static ErrorOr get(kvstore::KVStore* kv); protected: LastUpdateTimeMan() = default; diff --git a/src/meta/MetaServiceUtils.cpp b/src/meta/MetaServiceUtils.cpp index 6b4000938..706014dd8 100644 --- a/src/meta/MetaServiceUtils.cpp +++ b/src/meta/MetaServiceUtils.cpp @@ -905,26 +905,6 @@ std::string MetaServiceUtils::genTimestampStr() { return ch; } -folly::Optional MetaServiceUtils::isIndexRebuilding(kvstore::KVStore* kvstore) { - folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); - auto prefix = rebuildIndexStatusPrefix(); - std::unique_ptr iter; - auto ret = kvstore->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (ret != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "prefix index rebuilding state failed, result code: " << ret; - return folly::none; - } - - while (iter->valid()) { - if (iter->val() == "RUNNING") { - return true; - } - iter->next(); - } - - return false; -} - std::function MetaServiceUtils::spaceFilter(const std::unordered_set& spaces, std::function parseSpace) { @@ -1036,127 +1016,6 @@ folly::Optional> MetaServiceUtils::backup( return files; } -bool MetaServiceUtils::replaceHostInPartition(kvstore::KVStore* kvstore, - const HostAddr& ipv4From, - const HostAddr& ipv4To, - bool direct) { - folly::SharedMutex::WriteHolder wHolder(LockUtils::spaceLock()); - const auto& spacePrefix = MetaServiceUtils::spacePrefix(); - std::unique_ptr iter; - auto kvRet = kvstore->prefix(kDefaultSpaceId, kDefaultPartId, spacePrefix, &iter); - if (kvRet != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << folly::stringPrintf("can't get space prefix=%s", spacePrefix.c_str()); - return false; - } - - std::vector allSpaceId; - while (iter->valid()) { - auto spaceId = MetaServiceUtils::spaceId(iter->key()); - allSpaceId.emplace_back(spaceId); - iter->next(); - } - LOG(INFO) << "allSpaceId.size()=" << allSpaceId.size(); - - std::vector data; - - for (const auto& spaceId : allSpaceId) { - const auto& partPrefix = MetaServiceUtils::partPrefix(spaceId); - kvRet = kvstore->prefix(kDefaultSpaceId, kDefaultPartId, partPrefix, &iter); - if (kvRet != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << folly::stringPrintf("can't get partPrefix=%s", partPrefix.c_str()); - return false; - } - - while (iter->valid()) { - bool needUpdate = false; - auto partHosts = MetaServiceUtils::parsePartVal(iter->val()); - for (auto& host : partHosts) { - if (host == ipv4From) { - needUpdate = true; - host = ipv4To; - } - } - if (needUpdate) { - data.emplace_back(iter->key(), MetaServiceUtils::partVal(partHosts)); - } - iter->next(); - } - } - - if (direct) { - kvRet = kvstore->multiPutWithoutReplicator(kDefaultSpaceId, std::move(data)); - if (kvRet != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "multiPutWithoutReplicator failed kvRet=" << kvRet; - return false; - } - return true; - } - bool updateSucceed{false}; - folly::Baton baton; - kvstore->asyncMultiPut( - kDefaultSpaceId, kDefaultPartId, std::move(data), [&](kvstore::ResultCode code) { - updateSucceed = (code == kvstore::ResultCode::SUCCEEDED); - LOG(ERROR) << "multiPutWithoutReplicator failed kvRet=" << code; - baton.post(); - }); - baton.wait(); - - return updateSucceed; -} - -bool MetaServiceUtils::replaceHostInZone(kvstore::KVStore* kvstore, - const HostAddr& ipv4From, - const HostAddr& ipv4To, - bool direct) { - folly::SharedMutex::WriteHolder wHolder(LockUtils::spaceLock()); - const auto& zonePrefix = MetaServiceUtils::zonePrefix(); - std::unique_ptr iter; - auto kvRet = kvstore->prefix(kDefaultSpaceId, kDefaultPartId, zonePrefix, &iter); - if (kvRet != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << folly::stringPrintf("can't get zonePrefix=%s", zonePrefix.c_str()); - return false; - } - - std::vector data; - - while (iter->valid()) { - bool needUpdate = false; - auto zoneName = parseZoneName(iter->key()); - auto hosts = parseZoneHosts(iter->val()); - std::vector DesHosts; - for (auto& host : hosts) { - if (host == ipv4From) { - needUpdate = true; - host = ipv4To; - } - } - if (needUpdate) { - data.emplace_back(iter->key(), MetaServiceUtils::zoneVal(hosts)); - } - iter->next(); - } - - if (direct) { - kvRet = kvstore->multiPutWithoutReplicator(kDefaultSpaceId, std::move(data)); - if (kvRet != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "multiPutWithoutReplicator failed kvRet=" << kvRet; - return false; - } - return true; - } - bool updateSucceed{false}; - folly::Baton baton; - kvstore->asyncMultiPut( - kDefaultSpaceId, kDefaultPartId, std::move(data), [&](kvstore::ResultCode code) { - updateSucceed = (code == kvstore::ResultCode::SUCCEEDED); - LOG(ERROR) << "multiPutWithoutReplicator failed kvRet=" << code; - baton.post(); - }); - baton.wait(); - - return updateSucceed; -} - std::string MetaServiceUtils::balanceTaskKey(BalanceID balanceId, GraphSpaceID spaceId, PartitionID partId, diff --git a/src/meta/MetaServiceUtils.h b/src/meta/MetaServiceUtils.h index 402c92474..31f071bda 100644 --- a/src/meta/MetaServiceUtils.h +++ b/src/meta/MetaServiceUtils.h @@ -309,8 +309,6 @@ class MetaServiceUtils final { static std::string genTimestampStr(); - static folly::Optional isIndexRebuilding(kvstore::KVStore*); - static GraphSpaceID parseEdgesKeySpaceID(folly::StringPiece key); static GraphSpaceID parseTagsKeySpaceID(folly::StringPiece key); static GraphSpaceID parseIndexesKeySpaceID(folly::StringPiece key); @@ -318,16 +316,6 @@ class MetaServiceUtils final { static GraphSpaceID parseIndexKeySpaceID(folly::StringPiece key); static GraphSpaceID parseDefaultKeySpaceID(folly::StringPiece key); - // A direct value of true means that data will not be written to follow via the raft protocol, - // but will be written directly to local disk - static bool replaceHostInPartition(kvstore::KVStore* kvstore, - const HostAddr& ipv4From, - const HostAddr& ipv4To, - bool direct = false); - static bool replaceHostInZone(kvstore::KVStore* kvstore, - const HostAddr& ipv4From, - const HostAddr& ipv4To, - bool direct = false); // backup static ErrorOr> backupIndexTable( kvstore::KVStore* kvstore, diff --git a/src/meta/common/MetaCommon.h b/src/meta/common/MetaCommon.h index a0e1fd978..9e88a6cad 100644 --- a/src/meta/common/MetaCommon.h +++ b/src/meta/common/MetaCommon.h @@ -28,11 +28,14 @@ class MetaCommon final { return false; } + // todo unify cpp2::ErrorCode and ResultCode static cpp2::ErrorCode to(kvstore::ResultCode code) { switch (code) { case kvstore::ResultCode::SUCCEEDED: return cpp2::ErrorCode::SUCCEEDED; + case kvstore::ResultCode::ERR_SPACE_NOT_FOUND: case kvstore::ResultCode::ERR_KEY_NOT_FOUND: + case kvstore::ResultCode::ERR_PART_NOT_FOUND: return cpp2::ErrorCode::E_NOT_FOUND; case kvstore::ResultCode::ERR_LEADER_CHANGED: return cpp2::ErrorCode::E_LEADER_CHANGED; diff --git a/src/meta/processors/BaseProcessor.h b/src/meta/processors/BaseProcessor.h index 45570c743..f749cb284 100644 --- a/src/meta/processors/BaseProcessor.h +++ b/src/meta/processors/BaseProcessor.h @@ -10,7 +10,6 @@ #include "common/base/Base.h" #include "common/charset/Charset.h" #include "common/interface/gen-cpp2/storage_types.h" -#include "common/base/StatusOr.h" #include "common/time/Duration.h" #include "common/network/NetworkUtils.h" #include @@ -31,8 +30,9 @@ using FieldType = std::pair; using SignType = storage::cpp2::EngineSignType; #define CHECK_SPACE_ID_AND_RETURN(spaceID) \ - if (spaceExist(spaceID) == Status::SpaceNotFound()) { \ - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); \ + auto retSpace = spaceExist(spaceID); \ + if (retSpace != cpp2::ErrorCode::SUCCEEDED) { \ + handleErrorCode(retSpace); \ onFinished(); \ return; \ } @@ -119,17 +119,19 @@ class BaseProcessor { * */ void doPut(std::vector data); - StatusOr> doPrefix(const std::string& key); + ErrorOr> + doPrefix(const std::string& key); /** * General get function. * */ - StatusOr doGet(const std::string& key); + ErrorOr doGet(const std::string& key); /** * General multi get function. * */ - StatusOr> doMultiGet(const std::vector& keys); + ErrorOr> + doMultiGet(const std::vector& keys); /** * General remove function. @@ -144,8 +146,8 @@ class BaseProcessor { /** * Scan keys from start to end, doesn't contain end. * */ - StatusOr> doScan(const std::string& start, - const std::string& end); + ErrorOr> + doScan(const std::string& start, const std::string& end); /** * General multi remove function. **/ @@ -154,7 +156,7 @@ class BaseProcessor { /** * Get all hosts * */ - StatusOr> allHosts(); + ErrorOr> allHosts(); /** * Get one auto-increment Id. @@ -164,32 +166,32 @@ class BaseProcessor { /** * Check spaceId exist or not. * */ - Status spaceExist(GraphSpaceID spaceId); + cpp2::ErrorCode spaceExist(GraphSpaceID spaceId); /** * Check user exist or not. **/ - Status userExist(const std::string& account); + cpp2::ErrorCode userExist(const std::string& account); /** * Check host has been registered or not. * */ - Status hostExist(const std::string& hostKey); + cpp2::ErrorCode hostExist(const std::string& hostKey); /** * Return the spaceId for name. * */ - StatusOr getSpaceId(const std::string& name); + ErrorOr getSpaceId(const std::string& name); /** * Return the tagId for name. */ - StatusOr getTagId(GraphSpaceID spaceId, const std::string& name); + ErrorOr getTagId(GraphSpaceID spaceId, const std::string& name); /** * Fetch the latest version tag's schema. */ - StatusOr + ErrorOr getLatestTagSchema(GraphSpaceID spaceId, const TagID tagId); /** @@ -200,19 +202,21 @@ class BaseProcessor { /** * Return the edgeType for name. */ - StatusOr getEdgeType(GraphSpaceID spaceId, const std::string& name); + ErrorOr getEdgeType(GraphSpaceID spaceId, const std::string& name); /** * Fetch the latest version edge's schema. */ - StatusOr + ErrorOr getLatestEdgeSchema(GraphSpaceID spaceId, const EdgeType edgeType); - StatusOr getIndexID(GraphSpaceID spaceId, const std::string& indexName); + ErrorOr + getIndexID(GraphSpaceID spaceId, const std::string& indexName); - bool checkPassword(const std::string& account, const std::string& password); + ErrorOr + checkPassword(const std::string& account, const std::string& password); - kvstore::ResultCode doSyncPut(std::vector data); + cpp2::ErrorCode doSyncPut(std::vector data); void doSyncPutAndUpdate(std::vector data); @@ -224,23 +228,37 @@ class BaseProcessor { cpp2::ErrorCode indexCheck(const std::vector& items, const std::vector& alterItems); - StatusOr> + ErrorOr> getIndexes(GraphSpaceID spaceId, int32_t tagOrEdge); bool checkIndexExist(const std::vector& fields, const cpp2::IndexItem& item); - StatusOr getGroupId(const std::string& groupName); + ErrorOr getGroupId(const std::string& groupName); - StatusOr getZoneId(const std::string& zoneName); + ErrorOr getZoneId(const std::string& zoneName); - Status listenerExist(GraphSpaceID space, cpp2::ListenerType type); + cpp2::ErrorCode listenerExist(GraphSpaceID space, cpp2::ListenerType type); + + ErrorOr isIndexRebuilding(); + + // A direct value of true means that data will not be written to follow via the raft protocol, + // but will be written directly to local disk + ErrorOr + replaceHostInPartition(const HostAddr& ipv4From, + const HostAddr& ipv4To, + bool direct = false); + + ErrorOr + replaceHostInZone(const HostAddr& ipv4From, + const HostAddr& ipv4To, + bool direct = false); protected: - kvstore::KVStore* kvstore_ = nullptr; - RESP resp_; - folly::Promise promise_; - time::Duration duration_; + kvstore::KVStore* kvstore_ = nullptr; + RESP resp_; + folly::Promise promise_; + time::Duration duration_; }; } // namespace meta diff --git a/src/meta/processors/BaseProcessor.inl b/src/meta/processors/BaseProcessor.inl index 531a76d75..d134b0100 100644 --- a/src/meta/processors/BaseProcessor.inl +++ b/src/meta/processors/BaseProcessor.inl @@ -25,39 +25,40 @@ void BaseProcessor::doPut(std::vector data) { template -StatusOr> +ErrorOr> BaseProcessor::doPrefix(const std::string& key) { std::unique_ptr iter; auto code = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, key, &iter); if (code != kvstore::ResultCode::SUCCEEDED) { - return Status::Error("Prefix Failed"); + VLOG(2) << "Prefix Failed"; + return MetaCommon::to(code); } return iter; } template -StatusOr BaseProcessor::doGet(const std::string& key) { +ErrorOr +BaseProcessor::doGet(const std::string& key) { std::string value; auto code = kvstore_->get(kDefaultSpaceId, kDefaultPartId, key, &value); - switch (code) { - case kvstore::ResultCode::SUCCEEDED: - return value; - case kvstore::ResultCode::ERR_KEY_NOT_FOUND: - return Status::Error("Key Not Found"); - default: - return Status::Error("Get Failed"); + if (code != kvstore::ResultCode::SUCCEEDED) { + VLOG(2) << "Get Failed"; + return MetaCommon::to(code); } + return value; } template -StatusOr> +ErrorOr> BaseProcessor::doMultiGet(const std::vector& keys) { std::vector values; auto ret = kvstore_->multiGet(kDefaultSpaceId, kDefaultPartId, keys, &values); - if (ret.first != kvstore::ResultCode::SUCCEEDED) { - return Status::Error("MultiGet Failed"); + auto code = ret.first; + if (code != kvstore::ResultCode::SUCCEEDED) { + VLOG(2) << "MultiGet Failed"; + return MetaCommon::to(code); } return values; } @@ -111,12 +112,13 @@ void BaseProcessor::doRemoveRange(const std::string& start, template -StatusOr> BaseProcessor::doScan(const std::string& start, - const std::string& end) { +ErrorOr> +BaseProcessor::doScan(const std::string& start, const std::string& end) { std::unique_ptr iter; auto code = kvstore_->range(kDefaultSpaceId, kDefaultPartId, start, end, &iter); if (code != kvstore::ResultCode::SUCCEEDED) { - return Status::Error("Scan Failed"); + VLOG(2) << "Scan Failed"; + return MetaCommon::to(code); } std::vector values; @@ -129,13 +131,14 @@ StatusOr> BaseProcessor::doScan(const std::string template -StatusOr> BaseProcessor::allHosts() { +ErrorOr> BaseProcessor::allHosts() { std::vector hosts; const auto& prefix = MetaServiceUtils::hostPrefix(); std::unique_ptr iter; - auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (ret != kvstore::ResultCode::SUCCEEDED) { - return Status::Error("Can't find any hosts"); + auto code = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); + if (code != kvstore::ResultCode::SUCCEEDED) { + VLOG(2) << "Can't find any hosts"; + return MetaCommon::to(code); } while (iter->valid()) { @@ -186,98 +189,110 @@ ErrorOr BaseProcessor::autoIncrementId() { template -Status BaseProcessor::spaceExist(GraphSpaceID spaceId) { +cpp2::ErrorCode BaseProcessor::spaceExist(GraphSpaceID spaceId) { folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); auto spaceKey = MetaServiceUtils::spaceKey(spaceId); auto ret = doGet(std::move(spaceKey)); - if (ret.ok()) { - return Status::OK(); + if (nebula::ok(ret)) { + return cpp2::ErrorCode::SUCCEEDED; } - return Status::SpaceNotFound(); + return nebula::error(ret); } template -Status BaseProcessor::userExist(const std::string& account) { +cpp2::ErrorCode BaseProcessor::userExist(const std::string& account) { auto userKey = MetaServiceUtils::userKey(account); auto ret = doGet(std::move(userKey)); - if (ret.ok()) { - return Status::OK(); + if (nebula::ok(ret)) { + return cpp2::ErrorCode::SUCCEEDED; } - return Status::UserNotFound(); + return nebula::error(ret); } + template -Status BaseProcessor::hostExist(const std::string& hostKey) { +cpp2::ErrorCode BaseProcessor::hostExist(const std::string& hostKey) { auto ret = doGet(hostKey); - if (ret.ok()) { - return Status::OK(); + if (nebula::ok(ret)) { + return cpp2::ErrorCode::SUCCEEDED; } - return Status::HostNotFound(); + return nebula::error(ret); } template -StatusOr BaseProcessor::getSpaceId(const std::string& name) { +ErrorOr BaseProcessor::getSpaceId(const std::string& name) { auto indexKey = MetaServiceUtils::indexSpaceKey(name); auto ret = doGet(indexKey); - if (ret.ok()) { - return *reinterpret_cast(ret.value().c_str()); + if (nebula::ok(ret)) { + return *reinterpret_cast(nebula::value(ret).c_str()); } - return Status::SpaceNotFound(folly::stringPrintf("Space %s not found", name.c_str())); + return nebula::error(ret); } template -StatusOr BaseProcessor::getTagId(GraphSpaceID spaceId, const std::string& name) { +ErrorOr +BaseProcessor::getTagId(GraphSpaceID spaceId, const std::string& name) { auto indexKey = MetaServiceUtils::indexTagKey(spaceId, name); std::string val; auto ret = doGet(std::move(indexKey)); - if (ret.ok()) { - return *reinterpret_cast(ret.value().c_str()); + if (nebula::ok(ret)) { + return *reinterpret_cast(nebula::value(ret).c_str()); } - return Status::TagNotFound(folly::stringPrintf("Tag %s not found", name.c_str())); + return nebula::error(ret); } template -StatusOr BaseProcessor::getEdgeType(GraphSpaceID spaceId, - const std::string& name) { +ErrorOr +BaseProcessor::getEdgeType(GraphSpaceID spaceId, const std::string& name) { auto indexKey = MetaServiceUtils::indexEdgeKey(spaceId, name); auto ret = doGet(std::move(indexKey)); - if (ret.ok()) { - return *reinterpret_cast(ret.value().c_str()); + if (nebula::ok(ret)) { + return *reinterpret_cast(nebula::value(ret).c_str()); } - return Status::EdgeNotFound(folly::stringPrintf("Edge %s not found", name.c_str())); + return nebula::error(ret); } template -StatusOr +ErrorOr BaseProcessor::getLatestTagSchema(GraphSpaceID spaceId, const TagID tagId) { - auto key = MetaServiceUtils::schemaTagPrefix(spaceId, tagId); + const auto& key = MetaServiceUtils::schemaTagPrefix(spaceId, tagId); auto ret = doPrefix(key); - if (!ret.ok()) { - LOG(ERROR) << "Tag Prefix " << key << " not found"; - return Status::Error(folly::stringPrintf("Tag Prefix %s not found", key.c_str())); + if (!nebula::ok(ret)) { + LOG(ERROR) << "Tag Prefix " << key << " failed"; + return nebula::error(ret); } - auto iter = ret.value().get(); - return MetaServiceUtils::parseSchema(iter->val()); + auto iter = nebula::value(ret).get(); + if (iter->valid()) { + return MetaServiceUtils::parseSchema(iter->val()); + } else { + LOG(ERROR) << "Tag Prefix " << key << " not found"; + return cpp2::ErrorCode::E_NOT_FOUND; + } } template -StatusOr +ErrorOr BaseProcessor::getLatestEdgeSchema(GraphSpaceID spaceId, const EdgeType edgeType) { - auto key = MetaServiceUtils::schemaEdgePrefix(spaceId, edgeType); + const auto& key = MetaServiceUtils::schemaEdgePrefix(spaceId, edgeType); auto ret = doPrefix(key); - if (!ret.ok()) { - LOG(ERROR) << "Edge Prefix " << key << " not found"; - return Status::Error(folly::stringPrintf("Edge Prefix %s not found", key.c_str())); + if (!nebula::ok(ret)) { + LOG(ERROR) << "Edge Prefix " << key << " failed"; + return nebula::error(ret); } - auto iter = ret.value().get(); - return MetaServiceUtils::parseSchema(iter->val()); + auto iter = nebula::value(ret).get(); + if (iter->valid()) { + return MetaServiceUtils::parseSchema(iter->val()); + } else { + LOG(ERROR) << "Edge Prefix " << key << " not found"; + return cpp2::ErrorCode::E_NOT_FOUND; + } } @@ -292,25 +307,31 @@ bool BaseProcessor::tagOrEdgeHasTTL(const cpp2::Schema& latestSchema) { template -StatusOr +ErrorOr BaseProcessor::getIndexID(GraphSpaceID spaceId, const std::string& indexName) { auto indexKey = MetaServiceUtils::indexIndexKey(spaceId, indexName); auto ret = doGet(indexKey); - if (ret.ok()) { - return *reinterpret_cast(ret.value().c_str()); + if (nebula::ok(ret)) { + return *reinterpret_cast(nebula::value(ret).c_str()); } - return Status::IndexNotFound(folly::stringPrintf("Index %s not found", indexName.c_str())); + return nebula::error(ret); } + template -bool BaseProcessor::checkPassword(const std::string& account, const std::string& password) { +ErrorOr +BaseProcessor::checkPassword(const std::string& account, const std::string& password) { auto userKey = MetaServiceUtils::userKey(account); auto ret = doGet(userKey); - return MetaServiceUtils::parseUserPwd(ret.value()) == password; + if (nebula::ok(ret)) { + return MetaServiceUtils::parseUserPwd(nebula::value(ret)) == password; + } + return nebula::error(ret); } + template -kvstore::ResultCode BaseProcessor::doSyncPut(std::vector data) { +cpp2::ErrorCode BaseProcessor::doSyncPut(std::vector data) { folly::Baton baton; auto ret = kvstore::ResultCode::SUCCEEDED; kvstore_->asyncMultiPut(kDefaultSpaceId, @@ -324,9 +345,10 @@ kvstore::ResultCode BaseProcessor::doSyncPut(std::vector data baton.post(); }); baton.wait(); - return ret; + return MetaCommon::to(ret); } + template void BaseProcessor::doSyncPutAndUpdate(std::vector data) { folly::Baton baton; @@ -347,11 +369,12 @@ void BaseProcessor::doSyncPutAndUpdate(std::vector data) { this->onFinished(); return; } - ret = LastUpdateTimeMan::update(kvstore_, time::WallClock::fastNowInMilliSec()); - this->handleErrorCode(MetaCommon::to(ret)); + auto retCode = LastUpdateTimeMan::update(kvstore_, time::WallClock::fastNowInMilliSec()); + this->handleErrorCode(retCode); this->onFinished(); } + template void BaseProcessor::doSyncMultiRemoveAndUpdate(std::vector keys) { folly::Baton baton; @@ -372,22 +395,26 @@ void BaseProcessor::doSyncMultiRemoveAndUpdate(std::vector ke this->onFinished(); return; } - ret = LastUpdateTimeMan::update(kvstore_, time::WallClock::fastNowInMilliSec()); - this->handleErrorCode(MetaCommon::to(ret)); + auto retCode = LastUpdateTimeMan::update(kvstore_, time::WallClock::fastNowInMilliSec()); + this->handleErrorCode(retCode); this->onFinished(); } + template -StatusOr> -BaseProcessor::getIndexes(GraphSpaceID spaceId, - int32_t tagOrEdge) { +ErrorOr> +BaseProcessor::getIndexes(GraphSpaceID spaceId, int32_t tagOrEdge) { std::vector items; - auto indexPrefix = MetaServiceUtils::indexPrefix(spaceId); + const auto& indexPrefix = MetaServiceUtils::indexPrefix(spaceId); auto iterRet = doPrefix(indexPrefix); - if (!iterRet.ok()) { - return iterRet.status(); + if (!nebula::ok(iterRet)) { + auto retCode = nebula::error(iterRet); + LOG(ERROR) << "Tag or edge index prefix failed, error :" + << static_cast(retCode); + return retCode; } - auto indexIter = iterRet.value().get(); + auto indexIter = nebula::value(iterRet).get(); + while (indexIter->valid()) { auto item = MetaServiceUtils::parseIndex(indexIter->val()); if (item.get_schema_id().getType() == cpp2::SchemaID::Type::tag_id && @@ -402,6 +429,7 @@ BaseProcessor::getIndexes(GraphSpaceID spaceId, return items; } + template cpp2::ErrorCode BaseProcessor::indexCheck(const std::vector& items, @@ -429,6 +457,7 @@ BaseProcessor::indexCheck(const std::vector& items, return cpp2::ErrorCode::SUCCEEDED; } + template bool BaseProcessor::checkIndexExist(const std::vector& fields, const cpp2::IndexItem& item) { @@ -450,38 +479,202 @@ bool BaseProcessor::checkIndexExist(const std::vector return false; } + template -StatusOr BaseProcessor::getGroupId(const std::string& groupName) { +ErrorOr BaseProcessor::getGroupId(const std::string& groupName) { auto indexKey = MetaServiceUtils::indexGroupKey(groupName); auto ret = doGet(std::move(indexKey)); - if (ret.ok()) { - return *reinterpret_cast(ret.value().c_str()); + if (nebula::ok(ret)) { + return *reinterpret_cast(nebula::value(ret).c_str()); } - return Status::GroupNotFound(folly::stringPrintf("Group %s not found", groupName.c_str())); + return nebula::error(ret); } + template -StatusOr BaseProcessor::getZoneId(const std::string& zoneName) { +ErrorOr BaseProcessor::getZoneId(const std::string& zoneName) { auto indexKey = MetaServiceUtils::indexZoneKey(zoneName); auto ret = doGet(std::move(indexKey)); - if (ret.ok()) { - return *reinterpret_cast(ret.value().c_str()); + if (nebula::ok(ret)) { + return *reinterpret_cast(nebula::value(ret).c_str()); } - return Status::ZoneNotFound(folly::stringPrintf("Zone %s not found", zoneName.c_str())); + return nebula::error(ret); } + template -Status BaseProcessor::listenerExist(GraphSpaceID space, cpp2::ListenerType type) { +cpp2::ErrorCode BaseProcessor::listenerExist(GraphSpaceID space, cpp2::ListenerType type) { folly::SharedMutex::ReadHolder rHolder(LockUtils::listenerLock()); - auto prefix = MetaServiceUtils::listenerPrefix(space, type); - auto iterRet = doPrefix(prefix); - if (!iterRet.ok()) { - return iterRet.status(); + const auto& prefix = MetaServiceUtils::listenerPrefix(space, type); + auto ret = doPrefix(prefix); + if (!nebula::ok(ret)) { + return nebula::error(ret); + } + + auto iterRet = nebula::value(ret).get(); + if (!iterRet->valid()) { + return cpp2::ErrorCode::E_NOT_FOUND; + } + return cpp2::ErrorCode::SUCCEEDED; +} + + +template +ErrorOr BaseProcessor::isIndexRebuilding() { + folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); + const auto& prefix = MetaServiceUtils::rebuildIndexStatusPrefix(); + auto ret = doPrefix(prefix); + if (!nebula::ok(ret)) { + auto retCode = nebula::error(ret); + LOG(ERROR) << "Prefix index rebuilding state failed, result code: " + << static_cast(retCode);; + return retCode; } - if (!iterRet.value().get()->valid()) { - return Status::ListenerNotFound(); + + auto iter = nebula::value(ret).get(); + while (iter->valid()) { + if (iter->val() == "RUNNING") { + return true; + } + iter->next(); } - return Status::OK(); + + return false; } + + +template +ErrorOr +BaseProcessor::replaceHostInPartition(const HostAddr& ipv4From, + const HostAddr& ipv4To, + bool direct) { + folly::SharedMutex::WriteHolder wHolder(LockUtils::spaceLock()); + const auto& spacePrefix = MetaServiceUtils::spacePrefix(); + auto iterRet = doPrefix(spacePrefix); + if (!nebula::ok(iterRet)) { + auto retCode = nebula::error(iterRet); + LOG(ERROR) << "Space prefix failed, error: " << static_cast(retCode); + return retCode; + } + auto iter = nebula::value(iterRet).get(); + + std::vector allSpaceId; + while (iter->valid()) { + auto spaceId = MetaServiceUtils::spaceId(iter->key()); + allSpaceId.emplace_back(spaceId); + iter->next(); + } + LOG(INFO) << "allSpaceId.size()=" << allSpaceId.size(); + + std::vector data; + + for (const auto& spaceId : allSpaceId) { + const auto& partPrefix = MetaServiceUtils::partPrefix(spaceId); + auto iterPartRet = doPrefix(partPrefix); + if (!nebula::ok(iterPartRet)) { + auto retCode = nebula::error(iterPartRet); + LOG(ERROR) << "Part prefix failed, error: " << static_cast(retCode); + return retCode; + } + iter = nebula::value(iterPartRet).get(); + + while (iter->valid()) { + bool needUpdate = false; + auto partHosts = MetaServiceUtils::parsePartVal(iter->val()); + for (auto& host : partHosts) { + if (host == ipv4From) { + needUpdate = true; + host = ipv4To; + } + } + if (needUpdate) { + data.emplace_back(iter->key(), MetaServiceUtils::partVal(partHosts)); + } + iter->next(); + } + } + + if (direct) { + // ?????? + auto kvRet = kvstore_->multiPutWithoutReplicator(kDefaultSpaceId, std::move(data)); + if (kvRet != kvstore::ResultCode::SUCCEEDED) { + LOG(ERROR) << "multiPutWithoutReplicator failed kvRet=" << kvRet; + return false; + } + return true; + } + + bool updateSucceed{false}; + folly::Baton baton; + // ???? + kvstore_->asyncMultiPut( + kDefaultSpaceId, kDefaultPartId, std::move(data), [&](kvstore::ResultCode code) { + updateSucceed = (code == kvstore::ResultCode::SUCCEEDED); + LOG(ERROR) << "multiPutWithoutReplicator failed kvRet=" << code; + baton.post(); + }); + baton.wait(); + + return updateSucceed; +} + + +template +ErrorOr +BaseProcessor::replaceHostInZone(const HostAddr& ipv4From, + const HostAddr& ipv4To, + bool direct) { + folly::SharedMutex::WriteHolder wHolder(LockUtils::spaceLock()); + const auto& zonePrefix = MetaServiceUtils::zonePrefix(); + auto iterRet = doPrefix(zonePrefix); + if (!nebula::ok(iterRet)) { + auto retCode = nebula::error(iterRet); + LOG(ERROR) << "Zone prefix failed, error: " << static_cast(retCode); + return retCode; + } + auto iter = nebula::value(iterRet).get(); + std::vector data; + + while (iter->valid()) { + bool needUpdate = false; + auto zoneName = MetaServiceUtils::parseZoneName(iter->key()); + auto hosts = MetaServiceUtils::parseZoneHosts(iter->val()); + std::vector DesHosts; + for (auto& host : hosts) { + if (host == ipv4From) { + needUpdate = true; + host = ipv4To; + } + } + if (needUpdate) { + data.emplace_back(iter->key(), MetaServiceUtils::zoneVal(hosts)); + } + iter->next(); + } + + if (direct) { + // ???? + auto kvRet = kvstore_->multiPutWithoutReplicator(kDefaultSpaceId, std::move(data)); + if (kvRet != kvstore::ResultCode::SUCCEEDED) { + LOG(ERROR) << "multiPutWithoutReplicator failed kvRet=" << kvRet; + return false; + } + return true; + } + + bool updateSucceed{false}; + folly::Baton baton; + // ???? + kvstore_->asyncMultiPut( + kDefaultSpaceId, kDefaultPartId, std::move(data), [&](kvstore::ResultCode code) { + updateSucceed = (code == kvstore::ResultCode::SUCCEEDED); + LOG(ERROR) << "multiPutWithoutReplicator failed kvRet=" << code; + baton.post(); + }); + baton.wait(); + + return updateSucceed; +} + } // namespace meta } // namespace nebula diff --git a/src/meta/processors/admin/AdminClient.cpp b/src/meta/processors/admin/AdminClient.cpp index 7ef78bc22..6338b3181 100644 --- a/src/meta/processors/admin/AdminClient.cpp +++ b/src/meta/processors/admin/AdminClient.cpp @@ -6,6 +6,7 @@ #include "meta/processors/admin/AdminClient.h" #include "meta/MetaServiceUtils.h" +#include "meta/common/MetaCommon.h" #include "meta/processors/Common.h" #include "meta/ActiveHostsMan.h" #include "kvstore/Part.h" @@ -24,11 +25,12 @@ folly::Future AdminClient::transLeader(GraphSpaceID spaceId, req.set_space_id(spaceId); req.set_part_id(partId); auto ret = getPeers(spaceId, partId); - if (!ret.ok()) { - LOG(ERROR) << "Get peers failed: " << ret.status(); - return ret.status(); + if (!nebula::ok(ret)) { + LOG(ERROR) << "Get peers failed: " << static_cast(nebula::error(ret)); + return Status::Error("Get peers failed"); } - auto peers = std::move(ret).value(); + + auto peers = std::move(nebula::value(ret)); auto it = std::find(peers.begin(), peers.end(), leader); if (it == peers.end()) { return Status::PartNotFound(); @@ -36,9 +38,12 @@ folly::Future AdminClient::transLeader(GraphSpaceID spaceId, auto target = dst; if (dst == kRandomPeer) { for (auto& p : peers) { - if (p != leader && ActiveHostsMan::isLived(kv_, p)) { - target = p; - break; + if (p != leader) { + auto retCode = ActiveHostsMan::isLived(kv_, p); + if (nebula::ok(retCode) && nebula::value(retCode)) { + target = p; + break; + } } } } @@ -71,11 +76,12 @@ folly::Future AdminClient::addPart(GraphSpaceID spaceId, req.set_part_id(partId); req.set_as_learner(asLearner); auto ret = getPeers(spaceId, partId); - if (!ret.ok()) { - LOG(ERROR) << "Get peers failed: " << ret.status(); - return ret.status(); + if (!nebula::ok(ret)) { + LOG(ERROR) << "Get peers failed: " << static_cast(nebula::error(ret)); + return Status::Error("Get peers failed"); } - req.set_peers(std::move(ret).value()); + + req.set_peers(std::move(nebula::value(ret))); return getResponse(Utils::getAdminAddrFromStoreAddr(host), std::move(req), [] (auto client, auto request) { return client->future_addPart(request); @@ -98,11 +104,12 @@ folly::Future AdminClient::addLearner(GraphSpaceID spaceId, req.set_part_id(partId); req.set_learner(learner); auto ret = getPeers(spaceId, partId); - if (!ret.ok()) { - LOG(ERROR) << "Get peers failed: " << ret.status(); - return ret.status(); + if (!nebula::ok(ret)) { + LOG(ERROR) << "Get peers failed: " << static_cast(nebula::error(ret)); + return Status::Error("Get peers failed"); } - auto peers = std::move(ret).value(); + + auto peers = std::move(nebula::value(ret)); folly::Promise pro; auto f = pro.getFuture(); getResponse(getAdminAddrFromPeers(peers), 0, std::move(req), @@ -121,11 +128,12 @@ folly::Future AdminClient::waitingForCatchUpData(GraphSpaceID spaceId, req.set_part_id(partId); req.set_target(target); auto ret = getPeers(spaceId, partId); - if (!ret.ok()) { - LOG(ERROR) << "Get peers failed: " << ret.status(); - return ret.status(); + if (!nebula::ok(ret)) { + LOG(ERROR) << "Get peers failed: " << static_cast(nebula::error(ret)); + return Status::Error("Get peers failed"); } - auto peers = std::move(ret).value(); + + auto peers = std::move(nebula::value(ret)); folly::Promise pro; auto f = pro.getFuture(); getResponse(getAdminAddrFromPeers(peers), 0, std::move(req), @@ -146,11 +154,12 @@ folly::Future AdminClient::memberChange(GraphSpaceID spaceId, req.set_add(added); req.set_peer(peer); auto ret = getPeers(spaceId, partId); - if (!ret.ok()) { - LOG(ERROR) << "Get peers failed: " << ret.status(); - return ret.status(); + if (!nebula::ok(ret)) { + LOG(ERROR) << "Get peers failed: " << static_cast(nebula::error(ret)); + return Status::Error("Get peers failed"); } - auto peers = std::move(ret).value(); + + auto peers = std::move(nebula::value(ret)); folly::Promise pro; auto f = pro.getFuture(); getResponse(getAdminAddrFromPeers(peers), 0, std::move(req), @@ -167,11 +176,12 @@ folly::Future AdminClient::updateMeta(GraphSpaceID spaceId, const HostAddr& dst) { CHECK_NOTNULL(kv_); auto ret = getPeers(spaceId, partId); - if (!ret.ok()) { - LOG(ERROR) << "Get peers failed: " << ret.status(); - return ret.status(); + if (!nebula::ok(ret)) { + LOG(ERROR) << "Get peers failed: " << static_cast(nebula::error(ret)); + return Status::Error("Get peers failed"); } - auto peers = std::move(ret).value(); + + auto peers = std::move(nebula::value(ret)); auto strHosts = [] (const std::vector& hosts) -> std::string { std::stringstream peersStr; for (auto& h : hosts) { @@ -242,20 +252,29 @@ folly::Future AdminClient::checkPeers(GraphSpaceID spaceId, PartitionID storage::cpp2::CheckPeersReq req; req.set_space_id(spaceId); req.set_part_id(partId); - auto ret = getPeers(spaceId, partId); - if (!ret.ok()) { - LOG(ERROR) << "Get peers failed: " << ret.status(); - return ret.status(); + auto peerRet = getPeers(spaceId, partId); + if (!nebula::ok(peerRet)) { + LOG(ERROR) << "Get peers failed: " << static_cast(nebula::error(peerRet)); + return Status::Error("Get peers failed"); } - auto peers = std::move(ret).value(); + + auto peers = std::move(nebula::value(peerRet)); req.set_peers(peers); folly::Promise pro; auto fut = pro.getFuture(); std::vector> futures; for (auto& p : peers) { - if (!ActiveHostsMan::isLived(kv_, p)) { - LOG(INFO) << "[" << spaceId << ":" << partId << "], Skip the dead host " << p; - continue; + auto ret = ActiveHostsMan::isLived(kv_, p); + if (!nebula::ok(ret)) { + auto retCode = nebula::error(ret); + LOG(INFO) << "Get active host failed, error: " << static_cast(retCode); + return Status::Error("Get peers failed"); + } else { + auto isLive = nebula::value(ret); + if (!isLive) { + LOG(INFO) << "[" << spaceId << ":" << partId << "], Skip the dead host " << p; + continue; + } } auto f = getResponse(Utils::getAdminAddrFromStoreAddr(p), req, [] (auto client, auto request) { @@ -473,22 +492,16 @@ void AdminClient::getResponse(std::vector hosts, }); // via } -StatusOr> AdminClient::getPeers(GraphSpaceID spaceId, PartitionID partId) { +ErrorOr> +AdminClient::getPeers(GraphSpaceID spaceId, PartitionID partId) { CHECK_NOTNULL(kv_); auto partKey = MetaServiceUtils::partKey(spaceId, partId); std::string value; auto code = kv_->get(kDefaultSpaceId, kDefaultPartId, partKey, &value); - switch (code) { - case kvstore::ResultCode::SUCCEEDED: { - return MetaServiceUtils::parsePartVal(value); - } - case kvstore::ResultCode::ERR_KEY_NOT_FOUND: - return Status::Error("Key Not Found"); - default: - LOG(WARNING) << "Get peers failed, error " << static_cast(code); - break; + if (code == kvstore::ResultCode::SUCCEEDED) { + return MetaServiceUtils::parsePartVal(value); } - return Status::Error("Get Failed"); + return MetaCommon::to(code); } std::vector AdminClient::getAdminAddrFromPeers(const std::vector &peers) { @@ -535,8 +548,13 @@ void AdminClient::getLeaderDist(const HostAddr& host, folly::Future AdminClient::getLeaderDist(HostLeaderMap* result) { folly::Promise promise; auto future = promise.getFuture(); - auto allHosts = ActiveHostsMan::getActiveHosts(kv_); + auto allHostsRet = ActiveHostsMan::getActiveHosts(kv_); + if (!nebula::ok(allHostsRet)) { + promise.setValue(Status::Error("Get leader failed")); + return future; + } + auto allHosts = nebula::value(allHostsRet); std::vector>> hostFutures; for (const auto& h : allHosts) { folly::Promise> pro; @@ -647,7 +665,21 @@ AdminClient::addTask(cpp2::AdminCmd cmd, std::vector parts, int concurrency, cpp2::StatisItem* statisResult) { - auto hosts = targetHost.empty() ? ActiveHostsMan::getActiveAdminHosts(kv_) : targetHost; + folly::Promise pro; + auto f = pro.getFuture(); + std::vector hosts; + if (targetHost.empty()) { + auto activeHostsRet = ActiveHostsMan::getActiveAdminHosts(kv_); + if (!nebula::ok(activeHostsRet)) { + pro.setValue(Status::Error("Get actice hosts failed")); + return f; + } else { + hosts = nebula::value(activeHostsRet); + } + } else { + hosts = targetHost; + } + storage::cpp2::AddAdminTaskRequest req; req.set_cmd(cmd); req.set_job_id(jobId); @@ -660,7 +692,6 @@ AdminClient::addTask(cpp2::AdminCmd cmd, para.set_task_specfic_paras(taskSpecficParas); req.set_para(std::move(para)); - folly::Promise pro; std::function respGen = [statisResult] (storage::cpp2::AdminExecResp&& resp) -> void { if (statisResult && resp.statis_ref().has_value()) { @@ -668,7 +699,6 @@ AdminClient::addTask(cpp2::AdminCmd cmd, } }; - auto f = pro.getFuture(); getResponse(hosts, 0, std::move(req), [] (auto client, auto request) { return client->future_addAdminTask(request); @@ -680,19 +710,32 @@ folly::Future AdminClient::stopTask(const std::vector& target, int32_t jobId, int32_t taskId) { - auto hosts = target.empty() ? ActiveHostsMan::getActiveAdminHosts(kv_) : target; + folly::Promise pro; + auto f = pro.getFuture(); + std::vector hosts; + if (target.empty()) { + auto activeHostsRet = ActiveHostsMan::getActiveAdminHosts(kv_); + if (!nebula::ok(activeHostsRet)) { + pro.setValue(Status::Error("Get actice hosts failed")); + return f; + } else { + hosts = nebula::value(activeHostsRet); + } + } else { + hosts = target; + } + storage::cpp2::StopAdminTaskRequest req; req.set_job_id(jobId); req.set_task_id(taskId); - folly::Promise pro; - auto f = pro.getFuture(); getResponse(hosts, 0, std::move(req), [] (auto client, auto request) { return client->future_stopAdminTask(request); }, 0, std::move(pro), 1); return f; } + } // namespace meta } // namespace nebula diff --git a/src/meta/processors/admin/AdminClient.h b/src/meta/processors/admin/AdminClient.h index 43094036b..2b958113b 100644 --- a/src/meta/processors/admin/AdminClient.h +++ b/src/meta/processors/admin/AdminClient.h @@ -136,7 +136,8 @@ class AdminClient { Status handleResponse(const storage::cpp2::AdminExecResp& resp); - StatusOr> getPeers(GraphSpaceID spaceId, PartitionID partId); + ErrorOr> + getPeers(GraphSpaceID spaceId, PartitionID partId); std::vector getAdminAddrFromPeers(const std::vector &peers); diff --git a/src/meta/processors/admin/BalancePlan.cpp b/src/meta/processors/admin/BalancePlan.cpp index c77cd2e76..e9e3d5e31 100644 --- a/src/meta/processors/admin/BalancePlan.cpp +++ b/src/meta/processors/admin/BalancePlan.cpp @@ -190,9 +190,18 @@ cpp2::ErrorCode BalancePlan::recovery(bool resume) { task.ret_ = BalanceTaskResult::IN_PROGRESS; } task.status_ = BalanceTaskStatus::START; - if (!ActiveHostsMan::isLived(kv_, task.dst_)) { - LOG(ERROR) << "The destination is not lived"; - task.ret_ = BalanceTaskResult::INVALID; + auto activeHostRet = ActiveHostsMan::isLived(kv_, task.dst_); + if (!nebula::ok(activeHostRet)) { + auto retCode = nebula::error(activeHostRet); + LOG(ERROR) << "Get active hosts failed, error: " + << static_cast(retCode); + return retCode; + } else { + auto isLive = nebula::value(activeHostRet); + if (!isLive) { + LOG(ERROR) << "The destination is not lived"; + task.ret_ = BalanceTaskResult::INVALID; + } } } } diff --git a/src/meta/processors/admin/BalanceProcessor.cpp b/src/meta/processors/admin/BalanceProcessor.cpp index 4b873774c..57856ed93 100644 --- a/src/meta/processors/admin/BalanceProcessor.cpp +++ b/src/meta/processors/admin/BalanceProcessor.cpp @@ -26,13 +26,13 @@ void BalanceProcessor::process(const cpp2::BalanceReq& req) { return; } auto ret = Balancer::instance(kvstore_)->stop(); - if (!ret.ok()) { - handleErrorCode(cpp2::ErrorCode::E_NO_RUNNING_BALANCE_PLAN); + if (!nebula::ok(ret)) { + handleErrorCode(nebula::error(ret)); onFinished(); return; } handleErrorCode(cpp2::ErrorCode::SUCCEEDED); - resp_.set_id(ret.value()); + resp_.set_id(nebula::value(ret)); onFinished(); return; } @@ -57,13 +57,14 @@ void BalanceProcessor::process(const cpp2::BalanceReq& req) { if (req.get_id() != nullptr) { auto ret = Balancer::instance(kvstore_)->show(*req.get_id()); - if (!ret.ok()) { - LOG(ERROR) << "Balance ID not found: " << *req.get_id(); - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + if (!nebula::ok(ret)) { + auto retCode = nebula::error(ret); + LOG(ERROR) << "Show balance ID failed, error " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } - const auto& plan = ret.value(); + const auto& plan = nebula::value(ret); std::vector thriftTasks; for (auto& task : plan.tasks()) { cpp2::BalanceTask t; @@ -89,7 +90,16 @@ void BalanceProcessor::process(const cpp2::BalanceReq& req) { return; } - auto hosts = ActiveHostsMan::getActiveHosts(kvstore_); + auto activeHostsRet = ActiveHostsMan::getActiveHosts(kvstore_); + if (!nebula::ok(activeHostsRet)) { + auto retCode = nebula::error(activeHostsRet); + LOG(ERROR) << "Get active hosts failed, error: " << static_cast(retCode); + handleErrorCode(retCode); + onFinished(); + return; + } + auto hosts = std::move(nebula::value(activeHostsRet)); + if (hosts.empty()) { LOG(ERROR) << "There is no active hosts"; handleErrorCode(cpp2::ErrorCode::E_NO_HOSTS); @@ -104,8 +114,9 @@ void BalanceProcessor::process(const cpp2::BalanceReq& req) { auto ret = Balancer::instance(kvstore_)->balance(std::move(lostHosts)); if (!ok(ret)) { - LOG(ERROR) << "Balance Failed: " << static_cast(ret.left()); - handleErrorCode(error(ret)); + auto retCode = error(ret); + LOG(ERROR) << "Balance Failed: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } diff --git a/src/meta/processors/admin/BalanceTask.cpp b/src/meta/processors/admin/BalanceTask.cpp index e3bd138e2..2acf9e99e 100644 --- a/src/meta/processors/admin/BalanceTask.cpp +++ b/src/meta/processors/admin/BalanceTask.cpp @@ -60,8 +60,8 @@ void BalanceTask::invoke() { case BalanceTaskStatus::CHANGE_LEADER: { LOG(INFO) << taskIdStr_ << " Ask the src to give up the leadership."; SAVE_STATE(); - bool srcLived = ActiveHostsMan::isLived(kv_, src_); - if (srcLived) { + auto srcLivedRet = ActiveHostsMan::isLived(kv_, src_); + if (nebula::ok(srcLivedRet) && nebula::value(srcLivedRet)) { client_->transLeader(spaceId_, partId_, src_).thenValue([this](auto&& resp) { if (!resp.ok()) { if (resp == nebula::Status::PartNotFound()) { @@ -176,10 +176,10 @@ void BalanceTask::invoke() { break; } case BalanceTaskStatus::REMOVE_PART_ON_SRC: { - bool srcLived = ActiveHostsMan::isLived(kv_, src_); - LOG(INFO) << taskIdStr_ << " Close part on src host, srcLived " << srcLived; + auto srcLivedRet = ActiveHostsMan::isLived(kv_, src_); + LOG(INFO) << taskIdStr_ << " Close part on src host, srcLived."; SAVE_STATE(); - if (srcLived) { + if (nebula::ok(srcLivedRet) && nebula::value(srcLivedRet)) { client_->removePart(spaceId_, partId_, src_).thenValue([this](auto&& resp) { if (!resp.ok()) { LOG(ERROR) << taskIdStr_ << " Remove part failed, status " << resp; diff --git a/src/meta/processors/admin/Balancer.cpp b/src/meta/processors/admin/Balancer.cpp index 8d14d3bfe..d7ddf6285 100644 --- a/src/meta/processors/admin/Balancer.cpp +++ b/src/meta/processors/admin/Balancer.cpp @@ -48,7 +48,7 @@ ErrorOr Balancer::balance(std::vector&& lo return plan_->id(); } -StatusOr Balancer::show(BalanceID id) const { +ErrorOr Balancer::show(BalanceID id) const { std::lock_guard lg(lock_); if (plan_ != nullptr && plan_->id() == id) { return *plan_; @@ -58,17 +58,18 @@ StatusOr Balancer::show(BalanceID id) const { BalancePlan plan(id, kv_, client_); auto retCode = plan.recovery(false); if (retCode != cpp2::ErrorCode::SUCCEEDED) { - return Status::Error("Get balance plan failed, id %ld", id); + LOG(ERROR) << "Get balance plan failed, id " << id; + return retCode; } return plan; } - return Status::Error("KV is nullptr"); + return cpp2::ErrorCode::E_NOT_FOUND; } -StatusOr Balancer::stop() { +ErrorOr Balancer::stop() { std::lock_guard lg(lock_); if (!running_) { - return Status::Error("No running balance plan"); + return cpp2::ErrorCode::E_NOT_FOUND; } CHECK(!!plan_); plan_->stop(); @@ -89,8 +90,9 @@ ErrorOr Balancer::cleanLastInValidPlan() { std::unique_ptr iter; auto ret = kv_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); if (ret != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Can't access kvstore, ret = " << static_cast(ret); - return MetaCommon::to(ret); + auto retCode = MetaCommon::to(ret); + LOG(ERROR) << "Can't access kvstore, ret = " << static_cast(retCode); + return retCode; } // There should be at most one invalid plan, and it must be the latest one if (iter->valid()) { @@ -130,8 +132,9 @@ cpp2::ErrorCode Balancer::recovery() { std::unique_ptr iter; auto ret = kv_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); if (ret != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Can't access kvstore, ret = " << static_cast(ret); - return MetaCommon::to(ret); + auto retCode = MetaCommon::to(ret); + LOG(ERROR) << "Can't access kvstore, ret = " << static_cast(retCode); + return retCode; } std::vector corruptedPlans; // The balance plan is stored with balance id desc order, there should be at most one @@ -156,7 +159,7 @@ cpp2::ErrorCode Balancer::recovery() { { std::lock_guard lg(lock_); if (LastUpdateTimeMan::update(kv_, time::WallClock::fastNowInMilliSec()) != - kvstore::ResultCode::SUCCEEDED) { + cpp2::ErrorCode::SUCCEEDED) { LOG(ERROR) << "Balance plan " << plan_->id() << " update meta failed"; } finish(); @@ -171,15 +174,17 @@ cpp2::ErrorCode Balancer::recovery() { return cpp2::ErrorCode::SUCCEEDED; } -bool Balancer::getAllSpaces(std::vector>& spaces) { +cpp2::ErrorCode +Balancer::getAllSpaces(std::vector>& spaces) { // Get all spaces folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); auto prefix = MetaServiceUtils::spacePrefix(); std::unique_ptr iter; auto ret = kv_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); if (ret != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Get all spaces failed"; - return false; + auto retCode = MetaCommon::to(ret); + LOG(ERROR) << "Get all spaces failed, error: " << static_cast(retCode); + return retCode; } while (iter->valid()) { @@ -189,7 +194,7 @@ bool Balancer::getAllSpaces(std::vector> spaces.emplace_back(spaceId, *properties.replica_factor_ref(), zoned); iter->next(); } - return true; + return cpp2::ErrorCode::SUCCEEDED; } cpp2::ErrorCode Balancer::buildBalancePlan(std::vector&& lostHosts) { @@ -199,9 +204,10 @@ cpp2::ErrorCode Balancer::buildBalancePlan(std::vector&& lostHosts) { } std::vector> spaces; - if (!getAllSpaces(spaces)) { + auto spacesRet = getAllSpaces(spaces); + if (spacesRet != cpp2::ErrorCode::SUCCEEDED) { LOG(ERROR) << "Can't get all spaces"; - return cpp2::ErrorCode::E_STORE_FAILURE; + return spacesRet; } plan_ = std::make_unique(time::WallClock::fastNowInSec(), kv_, client_); @@ -227,7 +233,7 @@ cpp2::ErrorCode Balancer::buildBalancePlan(std::vector&& lostHosts) { { std::lock_guard lg(lock_); if (LastUpdateTimeMan::update(kv_, time::WallClock::fastNowInMilliSec()) != - kvstore::ResultCode::SUCCEEDED) { + cpp2::ErrorCode::SUCCEEDED) { LOG(ERROR) << "Balance plan " << plan_->id() << " update meta failed"; } finish(); @@ -248,12 +254,21 @@ Balancer::genTasks(GraphSpaceID spaceId, int32_t totalParts = 0; // hostParts is current part allocation map auto result = getHostParts(spaceId, dependentOnGroup, hostParts, totalParts); - if (!result || totalParts == 0 || hostParts.empty()) { + if (!nebula::ok(result)) { + return nebula::error(result); + } + auto retVal = nebula::value(result); + if (!retVal || totalParts == 0 || hostParts.empty()) { LOG(ERROR) << "Invalid space " << spaceId; return cpp2::ErrorCode::E_NOT_FOUND; } - auto hostPartsRet = fetchHostParts(spaceId, dependentOnGroup, hostParts, lostHosts); + auto fetchHostPartsRet = fetchHostParts(spaceId, dependentOnGroup, hostParts, lostHosts); + if (!nebula::ok(fetchHostPartsRet)) { + return nebula::error(fetchHostPartsRet); + } + + auto hostPartsRet = nebula::value(fetchHostPartsRet); auto confirmedHostParts = hostPartsRet.first; auto activeHosts = hostPartsRet.second; LOG(INFO) << "Now, try to balance the confirmedHostParts"; @@ -275,10 +290,11 @@ Balancer::genTasks(GraphSpaceID spaceId, return cpp2::ErrorCode::E_NO_VALID_HOST; } - if (!transferLostHost(tasks, confirmedHostParts, lostHost, - spaceId, partId, dependentOnGroup)) { + auto retCode = transferLostHost(tasks, confirmedHostParts, lostHost, + spaceId, partId, dependentOnGroup); + if (retCode != cpp2::ErrorCode::SUCCEEDED) { LOG(ERROR) << "Transfer lost host " << lostHost << " failed"; - return cpp2::ErrorCode::E_NO_VALID_HOST; + return retCode; } } } @@ -295,25 +311,26 @@ Balancer::genTasks(GraphSpaceID spaceId, } } -bool Balancer::transferLostHost(std::vector& tasks, - HostParts& confirmedHostParts, - const HostAddr& source, - GraphSpaceID spaceId, - PartitionID partId, - bool dependentOnGroup) { +cpp2::ErrorCode +Balancer::transferLostHost(std::vector& tasks, + HostParts& confirmedHostParts, + const HostAddr& source, + GraphSpaceID spaceId, + PartitionID partId, + bool dependentOnGroup) { // find a host with minimum parts which doesn't have this part - StatusOr result; + ErrorOr result; if (dependentOnGroup) { result = hostWithMinimalPartsForZone(source, confirmedHostParts, partId); } else { result = hostWithMinimalParts(confirmedHostParts, partId); } - if (!result.ok()) { + if (!nebula::ok(result)) { LOG(ERROR) << "Can't find a host which doesn't have part: " << partId; - return false; + return nebula::error(result); } - const auto& targetHost = result.value(); + const auto& targetHost = nebula::value(result); confirmedHostParts[targetHost].emplace_back(partId); tasks.emplace_back(plan_->id_, spaceId, @@ -322,20 +339,25 @@ bool Balancer::transferLostHost(std::vector& tasks, targetHost, kv_, client_); - return true; + return cpp2::ErrorCode::SUCCEEDED; } -std::pair> +ErrorOr>> Balancer::fetchHostParts(GraphSpaceID spaceId, bool dependentOnGroup, const HostParts& hostParts, std::vector& lostHosts) { - std::vector activeHosts; + ErrorOr> activeHostsRet; if (dependentOnGroup) { - activeHosts = ActiveHostsMan::getActiveHostsWithGroup(kv_, spaceId); + activeHostsRet = ActiveHostsMan::getActiveHostsWithGroup(kv_, spaceId); } else { - activeHosts = ActiveHostsMan::getActiveHosts(kv_); + activeHostsRet = ActiveHostsMan::getActiveHosts(kv_); + } + + if (!nebula::ok(activeHostsRet)) { + return nebula::error(activeHostsRet); } + auto activeHosts = nebula::value(activeHostsRet); std::vector expand; calDiff(hostParts, activeHosts, expand, lostHosts); @@ -437,17 +459,20 @@ bool Balancer::balanceParts(BalanceID balanceId, return true; } -bool Balancer::getHostParts(GraphSpaceID spaceId, - bool dependentOnGroup, - HostParts& hostParts, - int32_t& totalParts) { +ErrorOr +Balancer::getHostParts(GraphSpaceID spaceId, + bool dependentOnGroup, + HostParts& hostParts, + int32_t& totalParts) { folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); auto prefix = MetaServiceUtils::partPrefix(spaceId); std::unique_ptr iter; auto code = kv_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); if (code != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Access kvstore failed, spaceId " << spaceId; - return false; + auto retCode = MetaCommon::to(code); + LOG(ERROR) << "Access kvstore failed, spaceId " << spaceId + << static_cast(retCode); + return retCode; } while (iter->valid()) { @@ -467,8 +492,10 @@ bool Balancer::getHostParts(GraphSpaceID spaceId, std::string value; code = kv_->get(kDefaultSpaceId, kDefaultPartId, key, &value); if (code != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Access kvstore failed, spaceId " << spaceId; - return false; + auto retCode = MetaCommon::to(code); + LOG(ERROR) << "Access kvstore failed, spaceId " << spaceId + << static_cast(retCode); + return retCode; } auto properties = MetaServiceUtils::parseSpace(value); @@ -479,9 +506,12 @@ bool Balancer::getHostParts(GraphSpaceID spaceId, if (properties.group_name_ref().has_value()) { auto groupName = *properties.group_name_ref(); - if (dependentOnGroup && !assembleZoneParts(groupName, hostParts)) { - LOG(ERROR) << "Assemble Zone Parts failed group: " << groupName; - return false; + if (dependentOnGroup) { + auto zonePartsRet = assembleZoneParts(groupName, hostParts); + if (zonePartsRet != cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Assemble Zone Parts failed group: " << groupName; + return zonePartsRet; + } } } @@ -489,13 +519,15 @@ bool Balancer::getHostParts(GraphSpaceID spaceId, return true; } -bool Balancer::assembleZoneParts(const std::string& groupName, HostParts& hostParts) { +cpp2::ErrorCode Balancer::assembleZoneParts(const std::string& groupName, HostParts& hostParts) { auto groupKey = MetaServiceUtils::groupKey(groupName); std::string groupValue; auto code = kv_->get(kDefaultSpaceId, kDefaultPartId, groupKey, &groupValue); if (code != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Get group " << groupName << " failed"; - return false; + auto retCode = MetaCommon::to(code); + LOG(ERROR) << "Get group " << groupName << " failed" + << static_cast(retCode); + return retCode; } // zoneHosts use to record this host belong to zone's hosts @@ -506,8 +538,10 @@ bool Balancer::assembleZoneParts(const std::string& groupName, HostParts& hostPa std::string zoneValue; code = kv_->get(kDefaultSpaceId, kDefaultPartId, zoneKey, &zoneValue); if (code != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Get zone " << zoneName << " failed"; - return false; + auto retCode = MetaCommon::to(code); + LOG(ERROR) << "Get zone " << zoneName << " failed" + << static_cast(retCode); + return retCode; } auto hosts = MetaServiceUtils::parseZoneHosts(std::move(zoneValue)); @@ -542,7 +576,7 @@ bool Balancer::assembleZoneParts(const std::string& groupName, HostParts& hostPa } } } - return true; + return cpp2::ErrorCode::SUCCEEDED; } void Balancer::calDiff(const HostParts& hostParts, @@ -594,32 +628,34 @@ Status Balancer::checkReplica(const HostParts& hostParts, return Status::Error("Not enough alive host hold the part %d", partId); } -StatusOr Balancer::hostWithMinimalParts(const HostParts& hostParts, - PartitionID partId) { +ErrorOr +Balancer::hostWithMinimalParts(const HostParts& hostParts, + PartitionID partId) { auto hosts = sortedHostsByParts(hostParts); for (auto& h : hosts) { auto it = hostParts.find(h.first); if (it == hostParts.end()) { LOG(ERROR) << "Host " << h.first << " not found"; - return Status::Error("Host not found"); + return cpp2::ErrorCode::E_NO_HOSTS; } if (std::find(it->second.begin(), it->second.end(), partId) == it->second.end()) { return h.first; } } - return Status::Error("No host is suitable for %d", partId); + return cpp2::ErrorCode::E_NO_HOSTS; } -StatusOr Balancer::hostWithMinimalPartsForZone(const HostAddr& source, - const HostParts& hostParts, - PartitionID partId) { +ErrorOr +Balancer::hostWithMinimalPartsForZone(const HostAddr& source, + const HostParts& hostParts, + PartitionID partId) { auto hosts = sortedHostsByParts(hostParts); for (auto& h : hosts) { auto it = hostParts.find(h.first); if (it == hostParts.end()) { LOG(ERROR) << "Host " << h.first << " not found"; - return Status::Error("Host not found"); + return cpp2::ErrorCode::E_NO_HOSTS; } if (std::find(it->second.begin(), it->second.end(), partId) == it->second.end() && @@ -627,7 +663,7 @@ StatusOr Balancer::hostWithMinimalPartsForZone(const HostAddr& source, return h.first; } } - return Status::Error("No host is suitable for %d", partId); + return cpp2::ErrorCode::E_NO_HOSTS; } cpp2::ErrorCode Balancer::leaderBalance() { @@ -640,9 +676,10 @@ cpp2::ErrorCode Balancer::leaderBalance() { auto future = promise.getFuture(); // Space ID, Replica Factor and Dependent On Group std::vector> spaces; - if (!getAllSpaces(spaces)) { + auto ret = getAllSpaces(spaces); + if (ret != cpp2::ErrorCode::SUCCEEDED) { LOG(ERROR) << "Can't get spaces"; - return cpp2::ErrorCode::E_STORE_FAILURE; + return ret; } bool expected = false; @@ -666,7 +703,7 @@ cpp2::ErrorCode Balancer::leaderBalance() { replicaFactor, dependentOnGroup, plan); - if (!balanceResult) { + if (!nebula::ok(balanceResult) || !nebula::value(balanceResult)) { LOG(ERROR) << "Building leader balance plan failed " << "Space: " << spaceId;; continue; @@ -698,12 +735,13 @@ cpp2::ErrorCode Balancer::leaderBalance() { return cpp2::ErrorCode::E_BALANCER_RUNNING; } -bool Balancer::buildLeaderBalancePlan(HostLeaderMap* hostLeaderMap, - GraphSpaceID spaceId, - int32_t replicaFactor, - bool dependentOnGroup, - LeaderBalancePlan& plan, - bool useDeviation) { +ErrorOr +Balancer::buildLeaderBalancePlan(HostLeaderMap* hostLeaderMap, + GraphSpaceID spaceId, + int32_t replicaFactor, + bool dependentOnGroup, + LeaderBalancePlan& plan, + bool useDeviation) { PartAllocation peersMap; HostParts leaderHostParts; size_t leaderParts = 0; @@ -713,8 +751,10 @@ bool Balancer::buildLeaderBalancePlan(HostLeaderMap* hostLeaderMap, std::unique_ptr iter; auto ret = kv_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); if (ret != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Access kvstore failed, spaceId " << spaceId; - return false; + auto retCode = MetaCommon::to(ret); + LOG(ERROR) << "Access kvstore failed, spaceId " << spaceId + << static_cast(retCode); + return retCode; } while (iter->valid()) { @@ -730,9 +770,14 @@ bool Balancer::buildLeaderBalancePlan(HostLeaderMap* hostLeaderMap, int32_t totalParts = 0; HostParts allHostParts; auto result = getHostParts(spaceId, dependentOnGroup, allHostParts, totalParts); - if (!result || totalParts == 0 || allHostParts.empty()) { - LOG(ERROR) << "Invalid space " << spaceId; - return false; + if (!nebula::ok(result)) { + return nebula::error(result); + } else { + auto retVal = nebula::value(result); + if (!retVal || totalParts == 0 || allHostParts.empty()) { + LOG(ERROR) << "Invalid space " << spaceId; + return false; + } } std::unordered_set activeHosts; @@ -942,14 +987,17 @@ void Balancer::simplifyLeaderBalnacePlan(GraphSpaceID spaceId, LeaderBalancePlan } } -bool Balancer::collectZoneParts(const std::string& groupName, - HostParts& hostParts) { +cpp2::ErrorCode +Balancer::collectZoneParts(const std::string& groupName, + HostParts& hostParts) { auto groupKey = MetaServiceUtils::groupKey(groupName); std::string groupValue; auto code = kv_->get(kDefaultSpaceId, kDefaultPartId, groupKey, &groupValue); if (code != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Get group " << groupName << " failed"; - return false; + auto retCode = MetaCommon::to(code); + LOG(ERROR) << "Get group " << groupName << " failed, error: " + << static_cast(retCode); + return retCode; } // zoneHosts use to record this host belong to zone's hosts @@ -960,8 +1008,10 @@ bool Balancer::collectZoneParts(const std::string& groupName, std::string zoneValue; code = kv_->get(kDefaultSpaceId, kDefaultPartId, zoneKey, &zoneValue); if (code != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Get zone " << zoneName << " failed"; - return false; + auto retCode = MetaCommon::to(code); + LOG(ERROR) << "Get zone " << zoneName << " failed, error: " + << static_cast(retCode); + return retCode; } auto hosts = MetaServiceUtils::parseZoneHosts(std::move(zoneValue)); @@ -996,7 +1046,7 @@ bool Balancer::collectZoneParts(const std::string& groupName, } } } - return true; + return cpp2::ErrorCode::SUCCEEDED; } bool Balancer::checkZoneLegal(const HostAddr& source, diff --git a/src/meta/processors/admin/Balancer.h b/src/meta/processors/admin/Balancer.h index 2c083d0a5..d4d1d3e04 100644 --- a/src/meta/processors/admin/Balancer.h +++ b/src/meta/processors/admin/Balancer.h @@ -88,12 +88,12 @@ class Balancer { /** * Show balance plan id status. * */ - StatusOr show(BalanceID id) const; + ErrorOr show(BalanceID id) const; /** * Stop balance plan by canceling all waiting balance task. * */ - StatusOr stop(); + ErrorOr stop(); /** * Clean invalid plan, return the invalid plan key if any @@ -158,18 +158,18 @@ class Balancer { bool dependentOnGroup, std::vector&& lostHosts); - std::pair> + ErrorOr>> fetchHostParts(GraphSpaceID spaceId, bool dependentOnGroup, const HostParts& hostParts, std::vector& lostHosts); - bool getHostParts(GraphSpaceID spaceId, - bool dependentOnGroup, - HostParts& hostParts, - int32_t& totalParts); + ErrorOr getHostParts(GraphSpaceID spaceId, + bool dependentOnGroup, + HostParts& hostParts, + int32_t& totalParts); - bool assembleZoneParts(const std::string& groupName, HostParts& hostParts); + cpp2::ErrorCode assembleZoneParts(const std::string& groupName, HostParts& hostParts); void calDiff(const HostParts& hostParts, const std::vector& activeHosts, @@ -181,36 +181,38 @@ class Balancer { int32_t replica, PartitionID partId); - StatusOr hostWithMinimalParts(const HostParts& hostParts, - PartitionID partId); + ErrorOr + hostWithMinimalParts(const HostParts& hostParts, PartitionID partId); - StatusOr hostWithMinimalPartsForZone(const HostAddr& source, - const HostParts& hostParts, - PartitionID partId); + ErrorOr + hostWithMinimalPartsForZone(const HostAddr& source, + const HostParts& hostParts, + PartitionID partId); bool balanceParts(BalanceID balanceId, GraphSpaceID spaceId, HostParts& newHostParts, int32_t totalParts, std::vector& tasks); - bool transferLostHost(std::vector& tasks, - HostParts& newHostParts, - const HostAddr& source, - GraphSpaceID spaceId, - PartitionID partId, - bool dependentOnGroup); + cpp2::ErrorCode transferLostHost(std::vector& tasks, + HostParts& newHostParts, + const HostAddr& source, + GraphSpaceID spaceId, + PartitionID partId, + bool dependentOnGroup); std::vector> sortedHostsByParts(const HostParts& hostParts); - bool getAllSpaces(std::vector>& spaces); + cpp2::ErrorCode getAllSpaces(std::vector>& spaces); - bool buildLeaderBalancePlan(HostLeaderMap* hostLeaderMap, - GraphSpaceID spaceId, - int32_t replicaFactor, - bool dependentOnGroup, - LeaderBalancePlan& plan, - bool useDeviation = true); + ErrorOr + buildLeaderBalancePlan(HostLeaderMap* hostLeaderMap, + GraphSpaceID spaceId, + int32_t replicaFactor, + bool dependentOnGroup, + LeaderBalancePlan& plan, + bool useDeviation = true); void simplifyLeaderBalnacePlan(GraphSpaceID spaceId, LeaderBalancePlan& plan); @@ -229,7 +231,7 @@ class Balancer { LeaderBalancePlan& plan, GraphSpaceID spaceId); - bool collectZoneParts(const std::string& groupName, HostParts& hostParts); + cpp2::ErrorCode collectZoneParts(const std::string& groupName, HostParts& hostParts); bool checkZoneLegal(const HostAddr& source, const HostAddr& target, PartitionID part); diff --git a/src/meta/processors/admin/CreateBackupProcessor.cpp b/src/meta/processors/admin/CreateBackupProcessor.cpp index bd336a7c4..5388d1339 100644 --- a/src/meta/processors/admin/CreateBackupProcessor.cpp +++ b/src/meta/processors/admin/CreateBackupProcessor.cpp @@ -11,17 +11,15 @@ namespace nebula { namespace meta { -folly::Optional> CreateBackupProcessor::spaceNameToId( +ErrorOr> +CreateBackupProcessor::spaceNameToId( const std::vector* backupSpaces) { folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); std::unordered_set spaces; if (backupSpaces != nullptr) { DCHECK(!backupSpaces->empty()); - std::vector values; std::vector keys; - - values.reserve(backupSpaces->size()); keys.reserve(backupSpaces->size()); std::transform( @@ -29,17 +27,14 @@ folly::Optional> CreateBackupProcessor::spaceNa return MetaServiceUtils::indexSpaceKey(name); }); - auto ret = kvstore_->multiGet(kDefaultSpaceId, kDefaultPartId, std::move(keys), &values); - if (ret.first != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Failed to get space id, error: " << ret.first; - if (ret.first == kvstore::ResultCode::ERR_KEY_NOT_FOUND) { - handleErrorCode(cpp2::ErrorCode::E_BACKUP_SPACE_NOT_FOUND); - } else { - handleErrorCode(MetaCommon::to(ret.first)); - } - return folly::none; + auto result = doMultiGet(std::move(keys)); + if (!nebula::ok(result)) { + auto retCode = nebula::error(result); + LOG(ERROR) << "MultiGet space failed, error: " << static_cast(retCode);; + return retCode; } + auto values = std::move(nebula::value(result)); std::transform(std::make_move_iterator(values.begin()), std::make_move_iterator(values.end()), std::inserter(spaces, spaces.end()), @@ -48,14 +43,16 @@ folly::Optional> CreateBackupProcessor::spaceNa }); } else { - auto prefix = MetaServiceUtils::spacePrefix(); - std::unique_ptr iter; - auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (ret != kvstore::ResultCode::SUCCEEDED) { - handleErrorCode(MetaCommon::to(ret)); - return folly::none; + const auto& prefix = MetaServiceUtils::spacePrefix(); + auto iterRet = doPrefix(prefix); + if (!nebula::ok(iterRet)) { + auto retCode = nebula::error(iterRet); + LOG(ERROR) << "Space prefix failed, error: " + << static_cast(retCode); + return retCode; } + auto iter = nebula::value(iterRet).get(); while (iter->valid()) { auto spaceId = MetaServiceUtils::spaceId(iter->key()); auto spaceName = MetaServiceUtils::spaceName(iter->val()); @@ -67,8 +64,7 @@ folly::Optional> CreateBackupProcessor::spaceNa if (spaces.empty()) { LOG(ERROR) << "Failed to create a full backup because there is currently no space."; - handleErrorCode(cpp2::ErrorCode::E_BACKUP_SPACE_NOT_FOUND); - return folly::none; + return cpp2::ErrorCode::E_BACKUP_SPACE_NOT_FOUND; } return spaces; @@ -83,15 +79,15 @@ void CreateBackupProcessor::process(const cpp2::CreateBackupReq& req) { return; } - auto result = MetaServiceUtils::isIndexRebuilding(kvstore_); - if (result == folly::none) { - LOG(ERROR) << "Index is rebuilding, not allowed to create backup."; - handleErrorCode(cpp2::ErrorCode::E_BACKUP_FAILURE); + auto result = isIndexRebuilding(); + if (!nebula::ok(result)) { + handleErrorCode(nebula::error(result)); onFinished(); return; } - if (result.value()) { + if (nebula::value(result)) { + LOG(ERROR) << "Index is rebuilding, not allowed to create backup."; handleErrorCode(cpp2::ErrorCode::E_BACKUP_BUILDING_INDEX); onFinished(); return; @@ -99,7 +95,14 @@ void CreateBackupProcessor::process(const cpp2::CreateBackupReq& req) { folly::SharedMutex::WriteHolder wHolder(LockUtils::snapshotLock()); - auto hosts = ActiveHostsMan::getActiveHosts(kvstore_); + auto activeHostsRet = ActiveHostsMan::getActiveHosts(kvstore_); + if (!nebula::ok(activeHostsRet)) { + handleErrorCode(nebula::error(activeHostsRet)); + onFinished(); + return; + } + auto hosts = std::move(nebula::value(activeHostsRet)); + if (hosts.empty()) { LOG(ERROR) << "There has some offline hosts"; handleErrorCode(cpp2::ErrorCode::E_NO_HOSTS); @@ -108,12 +111,13 @@ void CreateBackupProcessor::process(const cpp2::CreateBackupReq& req) { } auto spaceIdRet = spaceNameToId(backupSpaces); - if (spaceIdRet == folly::none) { + if (!nebula::ok(spaceIdRet)) { + handleErrorCode(nebula::error(spaceIdRet)); onFinished(); return; } - auto spaces = spaceIdRet.value(); + auto spaces = nebula::value(spaceIdRet); // The entire process follows mostly snapshot logic. std::vector data; @@ -139,9 +143,9 @@ void CreateBackupProcessor::process(const cpp2::CreateBackupReq& req) { // step 3 : Create checkpoint for all storage engines. auto sret = Snapshot::instance(kvstore_, client_)->createSnapshot(backupName); - if (sret.isLeftType()) { + if (!nebula::ok(sret)) { LOG(ERROR) << "Checkpoint create error on storage engine"; - handleErrorCode(sret.left()); + handleErrorCode(nebula::error(sret)); ret = Snapshot::instance(kvstore_, client_)->blockingWrites(SignType::BLOCK_OFF); if (ret != cpp2::ErrorCode::SUCCEEDED) { LOG(ERROR) << "Cancel write blocking error"; @@ -174,12 +178,12 @@ void CreateBackupProcessor::process(const cpp2::CreateBackupReq& req) { NetworkUtils::toHostsStr(hosts))); auto putRet = doSyncPut(std::move(data)); - if (putRet != kvstore::ResultCode::SUCCEEDED) { + if (putRet != cpp2::ErrorCode::SUCCEEDED) { LOG(ERROR) << "All checkpoint creations are done, " "but update checkpoint status error. " "backup : " << backupName; - handleErrorCode(MetaCommon::to(putRet)); + handleErrorCode(putRet); onFinished(); return; } @@ -187,18 +191,18 @@ void CreateBackupProcessor::process(const cpp2::CreateBackupReq& req) { std::unordered_map backupInfo; // set backup info - auto snapshotInfo = std::move(sret.right()); + auto snapshotInfo = std::move(nebula::value(sret)); for (auto id : spaces) { LOG(INFO) << "backup space " << id; cpp2::SpaceBackupInfo spaceInfo; auto spaceKey = MetaServiceUtils::spaceKey(id); auto spaceRet = doGet(spaceKey); - if (!spaceRet.ok()) { - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + if (!nebula::ok(spaceRet)) { + handleErrorCode(nebula::error(spaceRet)); onFinished(); return; } - auto properties = MetaServiceUtils::parseSpace(spaceRet.value()); + auto properties = MetaServiceUtils::parseSpace(nebula::value(spaceRet)); // todo we should save partition info. auto it = snapshotInfo.find(id); DCHECK(it != snapshotInfo.end()); @@ -212,11 +216,12 @@ void CreateBackupProcessor::process(const cpp2::CreateBackupReq& req) { backup.set_backup_info(std::move(backupInfo)); backup.set_backup_name(std::move(backupName)); - resp_.set_code(cpp2::ErrorCode::SUCCEEDED); + handleErrorCode(cpp2::ErrorCode::SUCCEEDED); resp_.set_meta(std::move(backup)); LOG(INFO) << "backup done"; onFinished(); } + } // namespace meta } // namespace nebula diff --git a/src/meta/processors/admin/CreateBackupProcessor.h b/src/meta/processors/admin/CreateBackupProcessor.h index e387df5ad..d66f9e472 100644 --- a/src/meta/processors/admin/CreateBackupProcessor.h +++ b/src/meta/processors/admin/CreateBackupProcessor.h @@ -19,18 +19,22 @@ class CreateBackupProcessor : public BaseProcessor { static CreateBackupProcessor* instance(kvstore::KVStore* kvstore, AdminClient* client) { return new CreateBackupProcessor(kvstore, client); } + void process(const cpp2::CreateBackupReq& req); private: explicit CreateBackupProcessor(kvstore::KVStore* kvstore, AdminClient* client) : BaseProcessor(kvstore), client_(client) {} + cpp2::ErrorCode cancelWriteBlocking(); - folly::Optional> spaceNameToId( - const std::vector* backupSpaces); + + ErrorOr> + spaceNameToId(const std::vector* backupSpaces); private: AdminClient* client_; }; + } // namespace meta } // namespace nebula diff --git a/src/meta/processors/admin/CreateSnapshotProcessor.cpp b/src/meta/processors/admin/CreateSnapshotProcessor.cpp index bc6fd1d14..a025472e1 100644 --- a/src/meta/processors/admin/CreateSnapshotProcessor.cpp +++ b/src/meta/processors/admin/CreateSnapshotProcessor.cpp @@ -15,14 +15,14 @@ namespace meta { void CreateSnapshotProcessor::process(const cpp2::CreateSnapshotReq&) { // check the index rebuild. not allowed to create snapshot when index rebuilding. - auto result = MetaServiceUtils::isIndexRebuilding(kvstore_); - if (result == folly::none) { - handleErrorCode(cpp2::ErrorCode::E_SNAPSHOT_FAILURE); + auto result = isIndexRebuilding(); + if (!nebula::ok(result)) { + handleErrorCode(nebula::error(result)); onFinished(); return; } - if (result.value()) { + if (nebula::value(result)) { LOG(ERROR) << "Index is rebuilding, not allowed to create snapshot."; handleErrorCode(cpp2::ErrorCode::E_SNAPSHOT_FAILURE); onFinished(); @@ -32,7 +32,14 @@ void CreateSnapshotProcessor::process(const cpp2::CreateSnapshotReq&) { auto snapshot = folly::format("SNAPSHOT_{}", MetaServiceUtils::genTimestampStr()).str(); folly::SharedMutex::WriteHolder wHolder(LockUtils::snapshotLock()); - auto hosts = ActiveHostsMan::getActiveHosts(kvstore_); + auto activeHostsRet = ActiveHostsMan::getActiveHosts(kvstore_); + if (!nebula::ok(activeHostsRet)) { + handleErrorCode(nebula::error(activeHostsRet)); + onFinished(); + return; + } + auto hosts = std::move(nebula::value(activeHostsRet)); + if (hosts.empty()) { LOG(ERROR) << "There is no active hosts"; handleErrorCode(cpp2::ErrorCode::E_NO_HOSTS); @@ -48,9 +55,9 @@ void CreateSnapshotProcessor::process(const cpp2::CreateSnapshotReq&) { NetworkUtils::toHostsStr(hosts))); auto putRet = doSyncPut(std::move(data)); - if (putRet != kvstore::ResultCode::SUCCEEDED) { + if (putRet != cpp2::ErrorCode::SUCCEEDED) { LOG(ERROR) << "Write snapshot meta error"; - handleErrorCode(MetaCommon::to(putRet)); + handleErrorCode(putRet); onFinished(); return; } @@ -67,9 +74,9 @@ void CreateSnapshotProcessor::process(const cpp2::CreateSnapshotReq&) { // step 3 : Create checkpoint for all storage engines and meta engine. auto csRet = Snapshot::instance(kvstore_, client_)->createSnapshot(snapshot); - if (csRet.isLeftType()) { + if (!nebula::ok(csRet)) { LOG(ERROR) << "Checkpoint create error on storage engine"; - handleErrorCode(csRet.left()); + handleErrorCode(nebula::error(csRet)); cancelWriteBlocking(); onFinished(); return; @@ -99,11 +106,11 @@ void CreateSnapshotProcessor::process(const cpp2::CreateSnapshotReq&) { NetworkUtils::toHostsStr(hosts))); putRet = doSyncPut(std::move(data)); - if (putRet != kvstore::ResultCode::SUCCEEDED) { + if (putRet != cpp2::ErrorCode::SUCCEEDED) { LOG(ERROR) << "All checkpoint creations are done, " "but update checkpoint status error. " "snapshot : " << snapshot; - handleErrorCode(MetaCommon::to(putRet)); + handleErrorCode(putRet); } LOG(INFO) << "Create snapshot " << snapshot << " successfully"; diff --git a/src/meta/processors/admin/DropSnapshotProcessor.cpp b/src/meta/processors/admin/DropSnapshotProcessor.cpp index e65b26307..3b132c206 100644 --- a/src/meta/processors/admin/DropSnapshotProcessor.cpp +++ b/src/meta/processors/admin/DropSnapshotProcessor.cpp @@ -15,19 +15,19 @@ namespace meta { void DropSnapshotProcessor::process(const cpp2::DropSnapshotReq& req) { auto& snapshot = req.get_name(); folly::SharedMutex::WriteHolder wHolder(LockUtils::snapshotLock()); - std::string val; // Check snapshot is exists - auto ret = kvstore_->get(kDefaultSpaceId, kDefaultPartId, - MetaServiceUtils::snapshotKey(snapshot), - &val); - - if (ret != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "No snapshots found"; - handleErrorCode(MetaCommon::to(ret)); + auto key = MetaServiceUtils::snapshotKey(snapshot); + auto ret = doGet(std::move(key)); + if (!nebula::ok(ret)) { + auto retCode = nebula::error(ret); + LOG(ERROR) << "Get snapshot " << snapshot << " failed, error " + << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } + auto val = nebula::value(ret); auto hosts = MetaServiceUtils::parseSnapshotHosts(val); auto peersRet = NetworkUtils::toHosts(hosts); @@ -38,6 +38,7 @@ void DropSnapshotProcessor::process(const cpp2::DropSnapshotReq& req) { return; } + std::vector data; auto peers = peersRet.value(); auto dsRet = Snapshot::instance(kvstore_, client_)->dropSnapshot(snapshot, std::move(peers)); @@ -47,11 +48,11 @@ void DropSnapshotProcessor::process(const cpp2::DropSnapshotReq& req) { data.emplace_back(MetaServiceUtils::snapshotKey(snapshot), MetaServiceUtils::snapshotVal(cpp2::SnapshotStatus::INVALID, hosts)); auto putRet = doSyncPut(std::move(data)); - if (putRet != kvstore::ResultCode::SUCCEEDED) { + if (putRet != cpp2::ErrorCode::SUCCEEDED) { LOG(ERROR) << "Update snapshot status error. " "snapshot : " << snapshot; - handleErrorCode(MetaCommon::to(putRet)); } + handleErrorCode(putRet); onFinished(); return; } @@ -64,11 +65,11 @@ void DropSnapshotProcessor::process(const cpp2::DropSnapshotReq& req) { data.emplace_back(MetaServiceUtils::snapshotKey(snapshot), MetaServiceUtils::snapshotVal(cpp2::SnapshotStatus::INVALID, hosts)); auto putRet = doSyncPut(std::move(data)); - if (putRet != kvstore::ResultCode::SUCCEEDED) { + if (putRet != cpp2::ErrorCode::SUCCEEDED) { LOG(ERROR) << "Update snapshot status error. " "snapshot : " << snapshot; - handleErrorCode(MetaCommon::to(putRet)); } + handleErrorCode(putRet); onFinished(); return; } @@ -76,6 +77,7 @@ void DropSnapshotProcessor::process(const cpp2::DropSnapshotReq& req) { doRemove(MetaServiceUtils::snapshotKey(snapshot)); LOG(INFO) << "Drop snapshot " << snapshot << " successfully"; } + } // namespace meta } // namespace nebula diff --git a/src/meta/processors/admin/HBProcessor.cpp b/src/meta/processors/admin/HBProcessor.cpp index 520b2159a..c9ab62173 100644 --- a/src/meta/processors/admin/HBProcessor.cpp +++ b/src/meta/processors/admin/HBProcessor.cpp @@ -29,18 +29,19 @@ void HBProcessor::onFinished() { void HBProcessor::process(const cpp2::HBReq& req) { HostAddr host((*req.host_ref()).host, (*req.host_ref()).port); - if (FLAGS_hosts_whitelist_enabled - && hostExist(MetaServiceUtils::hostKey(host.host, host.port)) - == Status::HostNotFound()) { - LOG(INFO) << "Reject unregistered host " << host << "!"; - handleErrorCode(cpp2::ErrorCode::E_INVALID_HOST); - onFinished(); - return; + cpp2::ErrorCode ret; + if (FLAGS_hosts_whitelist_enabled) { + ret = hostExist(MetaServiceUtils::hostKey(host.host, host.port)); + if (ret != cpp2::ErrorCode::SUCCEEDED) { + LOG(INFO) << "Reject unregistered host " << host << "!"; + handleErrorCode(ret); + onFinished(); + return; + } } VLOG(3) << "Receive heartbeat from " << host << ", role = " << apache::thrift::util::enumNameSafe(req.get_role()); - auto ret = kvstore::ResultCode::SUCCEEDED; if (req.get_role() == cpp2::HostRole::STORAGE) { ClusterID peerCluserId = req.get_cluster_id(); if (peerCluserId == 0) { @@ -62,16 +63,20 @@ void HBProcessor::process(const cpp2::HBReq& req) { } else { ret = ActiveHostsMan::updateHostInfo(kvstore_, host, info); } - if (ret == kvstore::ResultCode::ERR_LEADER_CHANGED) { + if (ret == cpp2::ErrorCode::E_LEADER_CHANGED) { auto leaderRet = kvstore_->partLeader(kDefaultSpaceId, kDefaultPartId); if (nebula::ok(leaderRet)) { resp_.set_leader(toThriftHost(nebula::value(leaderRet))); } } - handleErrorCode(MetaCommon::to(ret)); - int64_t lastUpdateTime = LastUpdateTimeMan::get(this->kvstore_); - resp_.set_last_update_time_in_ms(lastUpdateTime); + auto lastUpdateTimeRet = LastUpdateTimeMan::get(kvstore_); + if (nebula::ok(lastUpdateTimeRet)) { + resp_.set_last_update_time_in_ms(nebula::value(lastUpdateTimeRet)); + } else if (nebula::error(lastUpdateTimeRet) == cpp2::ErrorCode::E_NOT_FOUND) { + resp_.set_last_update_time_in_ms(0); + } + handleErrorCode(ret); onFinished(); } diff --git a/src/meta/processors/admin/ListSnapshotsProcessor.cpp b/src/meta/processors/admin/ListSnapshotsProcessor.cpp index f1ccf2d03..31523c192 100644 --- a/src/meta/processors/admin/ListSnapshotsProcessor.cpp +++ b/src/meta/processors/admin/ListSnapshotsProcessor.cpp @@ -11,19 +11,24 @@ namespace nebula { namespace meta { void ListSnapshotsProcessor::process(const cpp2::ListSnapshotsReq&) { - auto prefix = MetaServiceUtils::snapshotPrefix(); - std::unique_ptr iter; - auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (ret != kvstore::ResultCode::SUCCEEDED) { - handleErrorCode(MetaCommon::to(ret)); + const auto& prefix = MetaServiceUtils::snapshotPrefix(); + auto iterRet = doPrefix(prefix); + if (!nebula::ok(iterRet)) { + auto retCode = nebula::error(iterRet); + LOG(ERROR) << "Snapshot prefix failed, error: " + << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } + auto iter = nebula::value(iterRet).get(); + std::vector snapshots; while (iter->valid()) { + auto val = iter->val(); auto name = MetaServiceUtils::parseSnapshotName(iter->key()); - auto status = MetaServiceUtils::parseSnapshotStatus(iter->val()); - auto hosts = MetaServiceUtils::parseSnapshotHosts(iter->val()); + auto status = MetaServiceUtils::parseSnapshotStatus(val); + auto hosts = MetaServiceUtils::parseSnapshotHosts(val); cpp2::Snapshot snapshot; snapshot.set_name(std::move(name)); snapshot.set_status(std::move(status)); @@ -31,9 +36,11 @@ void ListSnapshotsProcessor::process(const cpp2::ListSnapshotsReq&) { snapshots.emplace_back(std::move(snapshot)); iter->next(); } - resp_.set_code(cpp2::ErrorCode::SUCCEEDED); + + handleErrorCode(cpp2::ErrorCode::SUCCEEDED); resp_.set_snapshots(std::move(snapshots)); onFinished(); } + } // namespace meta } // namespace nebula diff --git a/src/meta/processors/admin/RestoreProcessor.cpp b/src/meta/processors/admin/RestoreProcessor.cpp index a739eb489..115697223 100644 --- a/src/meta/processors/admin/RestoreProcessor.cpp +++ b/src/meta/processors/admin/RestoreProcessor.cpp @@ -30,22 +30,36 @@ void RestoreProcessor::process(const cpp2::RestoreMetaReq& req) { auto replaceHosts = req.get_hosts(); if (!replaceHosts.empty()) { for (auto h : replaceHosts) { - auto result = MetaServiceUtils::replaceHostInPartition( - kvstore_, h.get_from_host(), h.get_to_host(), true); - if (!result) { + auto result = replaceHostInPartition(h.get_from_host(), h.get_to_host(), true); + if (!nebula::ok(result)) { LOG(ERROR) << "replaceHost in partition fails when recovered"; - handleErrorCode(cpp2::ErrorCode::E_RESTORE_FAILURE); + handleErrorCode(nebula::error(result)); onFinished(); return; + } else { + auto val = nebula::value(result); + if (!val) { + LOG(ERROR) << "replaceHost in partition fails when recovered"; + handleErrorCode(cpp2::ErrorCode::E_RESTORE_FAILURE); + onFinished(); + return; + } } - result = MetaServiceUtils::replaceHostInZone( - kvstore_, h.get_from_host(), h.get_to_host(), true); - if (!result) { - LOG(ERROR) << "replacehost in zone fails when recovered"; - handleErrorCode(cpp2::ErrorCode::E_RESTORE_FAILURE); + result = replaceHostInZone(h.get_from_host(), h.get_to_host(), true); + if (!nebula::ok(result)) { + LOG(ERROR) << "replaceHost in zone fails when recovered"; + handleErrorCode(nebula::error(result)); onFinished(); return; + } else { + auto val = nebula::value(result); + if (!val) { + LOG(ERROR) << "replaceHost in zone fails when recovered"; + handleErrorCode(cpp2::ErrorCode::E_RESTORE_FAILURE); + onFinished(); + return; + } } } } @@ -54,7 +68,9 @@ void RestoreProcessor::process(const cpp2::RestoreMetaReq& req) { unlink(f.c_str()); } + handleErrorCode(cpp2::ErrorCode::SUCCEEDED); onFinished(); } + } // namespace meta } // namespace nebula diff --git a/src/meta/processors/admin/SnapShot.cpp b/src/meta/processors/admin/SnapShot.cpp index 88d281e57..c24ffeaef 100644 --- a/src/meta/processors/admin/SnapShot.cpp +++ b/src/meta/processors/admin/SnapShot.cpp @@ -5,7 +5,7 @@ */ #include "meta/processors/admin/SnapShot.h" -#include +#include "meta/common/MetaCommon.h" #include "meta/processors/Common.h" #include "meta/ActiveHostsMan.h" #include "meta/MetaServiceUtils.h" @@ -13,14 +13,16 @@ namespace nebula { namespace meta { + ErrorOr>> Snapshot::createSnapshot(const std::string& name) { - auto retSpacesHosts = getSpacesHosts(); - if (!retSpacesHosts.ok()) { - return cpp2::ErrorCode::E_STORE_FAILURE; + auto retSpacesHostsRet = getSpacesHosts(); + if (!nebula::ok(retSpacesHostsRet)) { + return nebula::error(retSpacesHostsRet); } + auto spacesHosts = nebula::value(retSpacesHostsRet); std::unordered_map> info; - auto spacesHosts = retSpacesHosts.value(); + for (const auto& spaceHosts : spacesHosts) { for (const auto& host : spaceHosts.second) { auto status = client_->createSnapshot(spaceHosts.first, name, host).get(); @@ -36,11 +38,12 @@ Snapshot::createSnapshot(const std::string& name) { cpp2::ErrorCode Snapshot::dropSnapshot(const std::string& name, const std::vector& hosts) { - auto retSpacesHosts = getSpacesHosts(); - if (!retSpacesHosts.ok()) { - return cpp2::ErrorCode::E_STORE_FAILURE; + auto retSpacesHostsRet = getSpacesHosts(); + if (!nebula::ok(retSpacesHostsRet)) { + return nebula::error(retSpacesHostsRet); } - auto spacesHosts = retSpacesHosts.value(); + auto spacesHosts = nebula::value(retSpacesHostsRet); + for (const auto& spaceHosts : spacesHosts) { for (const auto& host : spaceHosts.second) { if (std::find(hosts.begin(), hosts.end(), host) != hosts.end()) { @@ -60,18 +63,19 @@ cpp2::ErrorCode Snapshot::dropSnapshot(const std::string& name, } cpp2::ErrorCode Snapshot::blockingWrites(storage::cpp2::EngineSignType sign) { - auto retSpacesHosts = getSpacesHosts(); - if (!retSpacesHosts.ok()) { - return cpp2::ErrorCode::E_STORE_FAILURE; + auto retSpacesHostsRet = getSpacesHosts(); + if (!nebula::ok(retSpacesHostsRet)) { + return nebula::error(retSpacesHostsRet); } - auto spacesHosts = retSpacesHosts.value(); + auto spacesHosts = nebula::value(retSpacesHostsRet); + auto ret = cpp2::ErrorCode::SUCCEEDED; for (const auto& spaceHosts : spacesHosts) { for (const auto& host : spaceHosts.second) { LOG(INFO) << "will block write host: " << host; auto status = client_->blockingWrites(spaceHosts.first, sign, host).get(); if (!status.ok()) { - LOG(ERROR) << " Send blocking sign error on host : " << host; + LOG(ERROR) << "Send blocking sign error on host : " << host; ret = cpp2::ErrorCode::E_BLOCK_WRITE_FAILURE; if (sign == storage::cpp2::EngineSignType::BLOCK_ON) { break; @@ -82,16 +86,21 @@ cpp2::ErrorCode Snapshot::blockingWrites(storage::cpp2::EngineSignType sign) { return ret; } -StatusOr>> Snapshot::getSpacesHosts() { +ErrorOr>> +Snapshot::getSpacesHosts() { folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); - std::map> hostsByspaces; - auto prefix = MetaServiceUtils::partPrefix(); + const auto& prefix = MetaServiceUtils::partPrefix(); std::unique_ptr iter; auto kvRet = kv_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); if (kvRet != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Get hosts meta data error"; - return Status::Error("Get hosts meta data error"); + auto retCode = MetaCommon::to(kvRet); + LOG(ERROR) << "Get hosts meta data failed, error: " + << static_cast(retCode); + return retCode; } + + std::map> hostsByspaces; + for (; iter->valid(); iter->next()) { auto partHosts = MetaServiceUtils::parsePartVal(iter->val()); auto space = MetaServiceUtils::parsePartKeySpaceId(iter->key()); diff --git a/src/meta/processors/admin/SnapShot.h b/src/meta/processors/admin/SnapShot.h index 7d2a743a5..d510d450b 100644 --- a/src/meta/processors/admin/SnapShot.h +++ b/src/meta/processors/admin/SnapShot.h @@ -42,7 +42,7 @@ class Snapshot { executor_.reset(new folly::CPUThreadPoolExecutor(1)); } - StatusOr>> getSpacesHosts(); + ErrorOr>> getSpacesHosts(); private: kvstore::KVStore* kv_{nullptr}; diff --git a/src/meta/processors/configMan/GetConfigProcessor.cpp b/src/meta/processors/configMan/GetConfigProcessor.cpp index c37006e5b..e6b503209 100644 --- a/src/meta/processors/configMan/GetConfigProcessor.cpp +++ b/src/meta/processors/configMan/GetConfigProcessor.cpp @@ -13,39 +13,52 @@ void GetConfigProcessor::process(const cpp2::GetConfigReq& req) { auto module = req.get_item().get_module(); auto name = req.get_item().get_name(); std::vector items; + cpp2::ErrorCode code = cpp2::ErrorCode::SUCCEEDED; - { + do { folly::SharedMutex::ReadHolder rHolder(LockUtils::configLock()); if (module != cpp2::ConfigModule::ALL) { - getOneConfig(module, name, items); + code = getOneConfig(module, name, items); + if (code != cpp2::ErrorCode::SUCCEEDED) { + break; + } } else { - getOneConfig(cpp2::ConfigModule::GRAPH, name, items); - getOneConfig(cpp2::ConfigModule::STORAGE, name, items); + code = getOneConfig(cpp2::ConfigModule::GRAPH, name, items); + if (code != cpp2::ErrorCode::SUCCEEDED) { + break; + } + code = getOneConfig(cpp2::ConfigModule::STORAGE, name, items); + if (code != cpp2::ErrorCode::SUCCEEDED) { + break; + } } - } + } while (false); - if (items.empty()) { - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); - } else { - handleErrorCode(cpp2::ErrorCode::SUCCEEDED); + if (code == cpp2::ErrorCode::SUCCEEDED) { resp_.set_items(std::move(items)); } + handleErrorCode(code); onFinished(); } -void GetConfigProcessor::getOneConfig(const cpp2::ConfigModule& module, - const std::string& name, - std::vector& items) { +cpp2::ErrorCode +GetConfigProcessor::getOneConfig(const cpp2::ConfigModule& module, + const std::string& name, + std::vector& items) { std::string configKey = MetaServiceUtils::configKey(module, name); auto ret = doGet(configKey); - if (!ret.ok()) { - return; + if (!nebula::ok(ret)) { + auto retCode = nebula::error(ret); + LOG(ERROR) << "Get config " << name << " failed, error: " + << static_cast(retCode); + return retCode; } - cpp2::ConfigItem item = MetaServiceUtils::parseConfigValue(ret.value()); + cpp2::ConfigItem item = MetaServiceUtils::parseConfigValue(nebula::value(ret)); item.set_module(module); item.set_name(name); items.emplace_back(std::move(item)); + return cpp2::ErrorCode::SUCCEEDED; } } // namespace meta diff --git a/src/meta/processors/configMan/GetConfigProcessor.h b/src/meta/processors/configMan/GetConfigProcessor.h index d8f1b5488..76899fdfa 100644 --- a/src/meta/processors/configMan/GetConfigProcessor.h +++ b/src/meta/processors/configMan/GetConfigProcessor.h @@ -21,8 +21,9 @@ class GetConfigProcessor : public BaseProcessor { void process(const cpp2::GetConfigReq& req); private: - void getOneConfig(const cpp2::ConfigModule& module, const std::string& name, - std::vector& items); + cpp2::ErrorCode getOneConfig(const cpp2::ConfigModule& module, + const std::string& name, + std::vector& items); explicit GetConfigProcessor(kvstore::KVStore* kvstore) : BaseProcessor(kvstore) {} diff --git a/src/meta/processors/configMan/ListConfigsProcessor.cpp b/src/meta/processors/configMan/ListConfigsProcessor.cpp index e1b3b936f..1bb88145e 100644 --- a/src/meta/processors/configMan/ListConfigsProcessor.cpp +++ b/src/meta/processors/configMan/ListConfigsProcessor.cpp @@ -12,14 +12,16 @@ namespace meta { void ListConfigsProcessor::process(const cpp2::ListConfigsReq& req) { folly::SharedMutex::ReadHolder rHolder(LockUtils::configLock()); - auto prefix = MetaServiceUtils::configKeyPrefix(req.get_module()); - std::unique_ptr iter; - auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (ret != kvstore::ResultCode::SUCCEEDED) { - handleErrorCode(MetaCommon::to(ret)); + const auto& prefix = MetaServiceUtils::configKeyPrefix(req.get_module()); + auto iterRet = doPrefix(prefix); + if (!nebula::ok(iterRet)) { + auto retCode = nebula::error(iterRet); + LOG(ERROR) << "List configs failed, error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } + auto iter = nebula::value(iterRet).get(); std::vector items; while (iter->valid()) { diff --git a/src/meta/processors/configMan/RegConfigProcessor.cpp b/src/meta/processors/configMan/RegConfigProcessor.cpp index d8a4bdfcb..965646d60 100644 --- a/src/meta/processors/configMan/RegConfigProcessor.cpp +++ b/src/meta/processors/configMan/RegConfigProcessor.cpp @@ -25,9 +25,19 @@ void RegConfigProcessor::process(const cpp2::RegConfigReq& req) { std::string configKey = MetaServiceUtils::configKey(module, name); // ignore config which has been registered before - if (doGet(configKey).ok()) { + auto configRet = doGet(configKey); + if (nebula::ok(configRet)) { continue; + } else { + auto retCode = nebula::error(configRet); + if (retCode != cpp2::ErrorCode::E_NOT_FOUND) { + LOG(ERROR) << "Get config Failed, error: " << static_cast(retCode); + handleErrorCode(retCode); + onFinished(); + return; + } } + std::string configValue = MetaServiceUtils::configValue(mode, value); data.emplace_back(std::move(configKey), std::move(configValue)); } diff --git a/src/meta/processors/configMan/SetConfigProcessor.cpp b/src/meta/processors/configMan/SetConfigProcessor.cpp index 159358416..6c59d7564 100644 --- a/src/meta/processors/configMan/SetConfigProcessor.cpp +++ b/src/meta/processors/configMan/SetConfigProcessor.cpp @@ -53,11 +53,14 @@ cpp2::ErrorCode SetConfigProcessor::setConfig(const cpp2::ConfigModule& module, std::vector& data) { std::string configKey = MetaServiceUtils::configKey(module, name); auto ret = doGet(std::move(configKey)); - if (!ret.ok()) { - return cpp2::ErrorCode::E_NOT_FOUND; + if (!nebula::ok(ret)) { + auto retCode = nebula::error((ret)); + LOG(ERROR) << "Set config " << name << " failed, error " + << static_cast(retCode); + return retCode; } - cpp2::ConfigItem item = MetaServiceUtils::parseConfigValue(ret.value()); + cpp2::ConfigItem item = MetaServiceUtils::parseConfigValue(nebula::value(ret)); cpp2::ConfigMode curMode = item.get_mode(); if (curMode == cpp2::ConfigMode::IMMUTABLE) { return cpp2::ErrorCode::E_CONFIG_IMMUTABLE; diff --git a/src/meta/processors/customKV/GetProcessor.cpp b/src/meta/processors/customKV/GetProcessor.cpp index 821641858..f7bac98a8 100644 --- a/src/meta/processors/customKV/GetProcessor.cpp +++ b/src/meta/processors/customKV/GetProcessor.cpp @@ -13,14 +13,17 @@ namespace meta { void GetProcessor::process(const cpp2::GetReq& req) { auto key = MetaServiceUtils::assembleSegmentKey(req.get_segment(), req.get_key()); auto result = doGet(key); - if (!result.ok()) { - LOG(ERROR) << "Get Failed: " << key << " not found!"; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + if (!nebula::ok(result)) { + auto retCode = nebula::error(result); + LOG(ERROR) << "Get Failed: " << key << " error: " + << static_cast(retCode);; + handleErrorCode(retCode); onFinished(); return; } + handleErrorCode(cpp2::ErrorCode::SUCCEEDED); - resp_.set_value(std::move(result.value())); + resp_.set_value(std::move(nebula::value(result))); onFinished(); } diff --git a/src/meta/processors/customKV/MultiGetProcessor.cpp b/src/meta/processors/customKV/MultiGetProcessor.cpp index 07bd86dc7..9922ec7eb 100644 --- a/src/meta/processors/customKV/MultiGetProcessor.cpp +++ b/src/meta/processors/customKV/MultiGetProcessor.cpp @@ -16,14 +16,16 @@ void MultiGetProcessor::process(const cpp2::MultiGetReq& req) { } auto result = doMultiGet(std::move(keys)); - if (!result.ok()) { - LOG(ERROR) << "MultiGet Failed: " << result.status(); - handleErrorCode(cpp2::ErrorCode::E_STORE_FAILURE); + if (!nebula::ok(result)) { + auto retCode = nebula::error(result); + LOG(ERROR) << "MultiGet Failed, error: " << static_cast(retCode);; + handleErrorCode(retCode); onFinished(); return; } + handleErrorCode(cpp2::ErrorCode::SUCCEEDED); - resp_.set_values(std::move(result.value())); + resp_.set_values(std::move(nebula::value(result))); onFinished(); } diff --git a/src/meta/processors/customKV/ScanProcessor.cpp b/src/meta/processors/customKV/ScanProcessor.cpp index a3434427d..84a6764e3 100644 --- a/src/meta/processors/customKV/ScanProcessor.cpp +++ b/src/meta/processors/customKV/ScanProcessor.cpp @@ -13,14 +13,16 @@ void ScanProcessor::process(const cpp2::ScanReq& req) { auto start = MetaServiceUtils::assembleSegmentKey(req.get_segment(), req.get_start()); auto end = MetaServiceUtils::assembleSegmentKey(req.get_segment(), req.get_end()); auto result = doScan(start, end); - if (!result.ok()) { - LOG(ERROR) << "Scan Failed: " << result.status(); - handleErrorCode(cpp2::ErrorCode::E_STORE_FAILURE); + if (!nebula::ok(result)) { + auto retCode = nebula::error(result); + LOG(ERROR) << "Scan Failed, error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } + handleErrorCode(cpp2::ErrorCode::SUCCEEDED); - resp_.set_values(std::move(result.value())); + resp_.set_values(std::move(nebula::value(result))); onFinished(); } diff --git a/src/meta/processors/indexMan/CreateEdgeIndexProcessor.cpp b/src/meta/processors/indexMan/CreateEdgeIndexProcessor.cpp index 8ff3573b8..d4c0c153d 100644 --- a/src/meta/processors/indexMan/CreateEdgeIndexProcessor.cpp +++ b/src/meta/processors/indexMan/CreateEdgeIndexProcessor.cpp @@ -38,35 +38,49 @@ void CreateEdgeIndexProcessor::process(const cpp2::CreateEdgeIndexReq& req) { folly::SharedMutex::ReadHolder rHolder(LockUtils::snapshotLock()); folly::SharedMutex::WriteHolder wHolder(LockUtils::edgeIndexLock()); auto ret = getIndexID(space, indexName); - if (ret.ok()) { - LOG(ERROR) << "Create Edge Index Failed: " << indexName << " has existed"; + if (nebula::ok(ret)) { if (req.get_if_not_exists()) { - resp_.set_id(to(ret.value(), EntryType::INDEX)); handleErrorCode(cpp2::ErrorCode::SUCCEEDED); } else { + LOG(ERROR) << "Create Edge Index Failed: " << indexName << " has existed"; handleErrorCode(cpp2::ErrorCode::E_EXISTED); } + resp_.set_id(to(nebula::value(ret), EntryType::INDEX)); onFinished(); return; + } else { + auto retCode = nebula::error(ret); + if (retCode != cpp2::ErrorCode::E_NOT_FOUND) { + LOG(ERROR) << "Create Edge Index Failed, index name " << indexName << " error: " + << static_cast(retCode); + handleErrorCode(retCode); + onFinished(); + return; + } } auto edgeTypeRet = getEdgeType(space, edgeName); - if (!edgeTypeRet.ok()) { - LOG(ERROR) << "Create Edge Index Failed: " << edgeName << " not exist"; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + if (!nebula::ok(edgeTypeRet)) { + auto retCode = nebula::error(edgeTypeRet); + LOG(ERROR) << "Create Edge Index Failed, Edge " << edgeName << " error: " + << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } - - auto edgeType = edgeTypeRet.value(); - auto prefix = MetaServiceUtils::indexPrefix(space); - std::unique_ptr checkIter; - auto checkRet = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &checkIter); - if (checkRet != kvstore::ResultCode::SUCCEEDED) { - resp_.set_code(MetaCommon::to(checkRet)); + auto edgeType = nebula::value(edgeTypeRet); + + const auto& prefix = MetaServiceUtils::indexPrefix(space); + auto iterRet = doPrefix(prefix); + if (!nebula::ok(iterRet)) { + auto retCode = nebula::error(iterRet); + LOG(ERROR) << "Edge indexes prefix failed, space id " << space + << " error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } + auto checkIter = nebula::value(iterRet).get(); while (checkIter->valid()) { auto val = checkIter->val(); @@ -87,13 +101,16 @@ void CreateEdgeIndexProcessor::process(const cpp2::CreateEdgeIndexReq& req) { } auto schemaRet = getLatestEdgeSchema(space, edgeType); - if (!schemaRet.ok()) { - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + if (!nebula::ok(schemaRet)) { + auto retCode = nebula::error(schemaRet); + LOG(ERROR) << "Get edge schema failed, space id " << space << " edgeName " << edgeName + << " error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } - auto latestEdgeSchema = schemaRet.value(); + auto latestEdgeSchema = std::move(nebula::value(schemaRet)); if (tagOrEdgeHasTTL(latestEdgeSchema)) { LOG(ERROR) << "Edge: " << edgeName << " has ttl, not create index"; handleErrorCode(cpp2::ErrorCode::E_INDEX_WITH_TTL); diff --git a/src/meta/processors/indexMan/CreateTagIndexProcessor.cpp b/src/meta/processors/indexMan/CreateTagIndexProcessor.cpp index dc24746ee..2c0c54fbb 100644 --- a/src/meta/processors/indexMan/CreateTagIndexProcessor.cpp +++ b/src/meta/processors/indexMan/CreateTagIndexProcessor.cpp @@ -38,35 +38,49 @@ void CreateTagIndexProcessor::process(const cpp2::CreateTagIndexReq& req) { folly::SharedMutex::ReadHolder rHolder(LockUtils::snapshotLock()); folly::SharedMutex::WriteHolder wHolder(LockUtils::tagIndexLock()); auto ret = getIndexID(space, indexName); - if (ret.ok()) { - LOG(ERROR) << "Create Tag Index Failed: " << indexName << " has existed"; + if (nebula::ok(ret)) { if (req.get_if_not_exists()) { - resp_.set_id(to(ret.value(), EntryType::INDEX)); handleErrorCode(cpp2::ErrorCode::SUCCEEDED); } else { + LOG(ERROR) << "Create Tag Index Failed: " << indexName << " has existed"; handleErrorCode(cpp2::ErrorCode::E_EXISTED); } + resp_.set_id(to(nebula::value(ret), EntryType::INDEX)); onFinished(); return; + } else { + auto retCode = nebula::error(ret); + if (retCode != cpp2::ErrorCode::E_NOT_FOUND) { + LOG(ERROR) << "Create Tag Index Failed, index name " << indexName << " error: " + << static_cast(retCode); + handleErrorCode(retCode); + onFinished(); + return; + } } auto tagIDRet = getTagId(space, tagName); - if (!tagIDRet.ok()) { - LOG(ERROR) << "Create Tag Index Failed: Tag " << tagName << " not exist"; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + if (!nebula::ok(tagIDRet)) { + auto retCode = nebula::error(tagIDRet); + LOG(ERROR) << "Create Tag Index Failed, Tag " << tagName << " error: " + << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } - - auto tagID = tagIDRet.value(); - auto prefix = MetaServiceUtils::indexPrefix(space); - std::unique_ptr checkIter; - auto checkRet = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &checkIter); - if (checkRet != kvstore::ResultCode::SUCCEEDED) { - resp_.set_code(MetaCommon::to(checkRet)); + auto tagID = nebula::value(tagIDRet); + + const auto& prefix = MetaServiceUtils::indexPrefix(space); + auto iterRet = doPrefix(prefix); + if (!nebula::ok(iterRet)) { + auto retCode = nebula::error(iterRet); + LOG(ERROR) << "Tag indexes prefix failed, space id " << space + << " error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } + auto checkIter = nebula::value(iterRet).get(); while (checkIter->valid()) { auto val = checkIter->val(); @@ -87,13 +101,16 @@ void CreateTagIndexProcessor::process(const cpp2::CreateTagIndexReq& req) { } auto schemaRet = getLatestTagSchema(space, tagID); - if (!schemaRet.ok()) { - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + if (!nebula::ok(schemaRet)) { + auto retCode = nebula::error(schemaRet); + LOG(ERROR) << "Get tag schema failed, space id " << space << " tagName " << tagName + << " error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } - auto latestTagSchema = schemaRet.value(); + auto latestTagSchema = std::move(nebula::value(schemaRet)); if (tagOrEdgeHasTTL(latestTagSchema)) { LOG(ERROR) << "Tag: " << tagName << " has ttl, not create index"; handleErrorCode(cpp2::ErrorCode::E_INDEX_WITH_TTL); diff --git a/src/meta/processors/indexMan/DropEdgeIndexProcessor.cpp b/src/meta/processors/indexMan/DropEdgeIndexProcessor.cpp index c0cc30ed1..914c16147 100644 --- a/src/meta/processors/indexMan/DropEdgeIndexProcessor.cpp +++ b/src/meta/processors/indexMan/DropEdgeIndexProcessor.cpp @@ -11,28 +11,37 @@ namespace meta { void DropEdgeIndexProcessor::process(const cpp2::DropEdgeIndexReq& req) { auto spaceID = req.get_space_id(); - auto indexName = req.get_index_name(); + const auto& indexName = req.get_index_name(); CHECK_SPACE_ID_AND_RETURN(spaceID); folly::SharedMutex::WriteHolder wHolder(LockUtils::edgeIndexLock()); - auto edgeIndexID = getIndexID(spaceID, indexName); - if (!edgeIndexID.ok()) { - LOG(ERROR) << "Edge Index not exists in Space: " << spaceID << " Index name: " << indexName; - if (req.get_if_exists()) { - handleErrorCode(cpp2::ErrorCode::SUCCEEDED); + auto edgeIndexIDRet = getIndexID(spaceID, indexName); + if (!nebula::ok(edgeIndexIDRet)) { + auto retCode = nebula::error(edgeIndexIDRet); + if (retCode == cpp2::ErrorCode::E_NOT_FOUND) { + if (req.get_if_exists()) { + retCode = cpp2::ErrorCode::SUCCEEDED; + } else { + LOG(ERROR) << "Drop Edge Index Failed, index name " << indexName + << " not exists in Space: "<< spaceID; + } } else { - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + LOG(ERROR) << "Drop Edge Index Failed, index name " << indexName + << " error: " << static_cast(retCode); } + handleErrorCode(retCode); onFinished(); return; } + auto edgeIndexID = nebula::value(edgeIndexIDRet); + std::vector keys; keys.emplace_back(MetaServiceUtils::indexIndexKey(spaceID, indexName)); - keys.emplace_back(MetaServiceUtils::indexKey(spaceID, edgeIndexID.value())); + keys.emplace_back(MetaServiceUtils::indexKey(spaceID, edgeIndexID)); LOG(INFO) << "Drop Edge Index " << indexName; - resp_.set_id(to(edgeIndexID.value(), EntryType::INDEX)); + resp_.set_id(to(edgeIndexID, EntryType::INDEX)); doSyncMultiRemoveAndUpdate(std::move(keys)); } diff --git a/src/meta/processors/indexMan/DropTagIndexProcessor.cpp b/src/meta/processors/indexMan/DropTagIndexProcessor.cpp index 128f191b4..c3b15aafb 100644 --- a/src/meta/processors/indexMan/DropTagIndexProcessor.cpp +++ b/src/meta/processors/indexMan/DropTagIndexProcessor.cpp @@ -11,28 +11,36 @@ namespace meta { void DropTagIndexProcessor::process(const cpp2::DropTagIndexReq& req) { auto spaceID = req.get_space_id(); - auto indexName = req.get_index_name(); + const auto& indexName = req.get_index_name(); CHECK_SPACE_ID_AND_RETURN(spaceID); folly::SharedMutex::WriteHolder wHolder(LockUtils::tagIndexLock()); - auto tagIndexID = getIndexID(spaceID, indexName); - if (!tagIndexID.ok()) { - LOG(ERROR) << "Tag Index not exists in Space: " << spaceID << " Index name: " << indexName; - if (req.get_if_exists()) { - handleErrorCode(cpp2::ErrorCode::SUCCEEDED); + auto tagIndexIDRet = getIndexID(spaceID, indexName); + if (!nebula::ok(tagIndexIDRet)) { + auto retCode = nebula::error(tagIndexIDRet); + if (retCode == cpp2::ErrorCode::E_NOT_FOUND) { + if (req.get_if_exists()) { + retCode = cpp2::ErrorCode::SUCCEEDED; + } else { + LOG(ERROR) << "Drop Tag Index Failed, index name " << indexName + << " not exists in Space: "<< spaceID; + } } else { - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + LOG(ERROR) << "Drop Tag Index Failed, index name " << indexName + << " error: " << static_cast(retCode); } + handleErrorCode(retCode); onFinished(); return; } + auto tagIndexID = nebula::value(tagIndexIDRet); std::vector keys; keys.emplace_back(MetaServiceUtils::indexIndexKey(spaceID, indexName)); - keys.emplace_back(MetaServiceUtils::indexKey(spaceID, tagIndexID.value())); + keys.emplace_back(MetaServiceUtils::indexKey(spaceID, tagIndexID)); LOG(INFO) << "Drop Tag Index " << indexName; - resp_.set_id(to(tagIndexID.value(), EntryType::INDEX)); + resp_.set_id(to(tagIndexID, EntryType::INDEX)); doSyncMultiRemoveAndUpdate(std::move(keys)); } diff --git a/src/meta/processors/indexMan/FTServiceProcessor.cpp b/src/meta/processors/indexMan/FTServiceProcessor.cpp index a80e4a477..e8903f5b0 100644 --- a/src/meta/processors/indexMan/FTServiceProcessor.cpp +++ b/src/meta/processors/indexMan/FTServiceProcessor.cpp @@ -13,16 +13,25 @@ void SignInFTServiceProcessor::process(const cpp2::SignInFTServiceReq& req) { folly::SharedMutex::WriteHolder wHolder(LockUtils::fulltextServicesLock()); auto serviceKey = MetaServiceUtils::fulltextServiceKey(); auto ret = doGet(serviceKey); - if (ret.ok()) { + if (nebula::ok(ret)) { + LOG(ERROR) << "Fulltext already exists."; handleErrorCode(cpp2::ErrorCode::E_EXISTED); onFinished(); return; + } else { + auto retCode = nebula::error(ret); + if (retCode != cpp2::ErrorCode::E_NOT_FOUND) { + LOG(ERROR) << "Sign in fulltext failed, error: " + << static_cast(retCode); + handleErrorCode(retCode); + onFinished(); + return; + } } std::vector data; data.emplace_back(std::move(serviceKey), MetaServiceUtils::fulltextServiceVal(req.get_type(), req.get_clients())); - handleErrorCode(cpp2::ErrorCode::SUCCEEDED); doSyncPutAndUpdate(std::move(data)); } @@ -30,26 +39,35 @@ void SignOutFTServiceProcessor::process(const cpp2::SignOutFTServiceReq&) { folly::SharedMutex::WriteHolder wHolder(LockUtils::fulltextServicesLock()); auto serviceKey = MetaServiceUtils::fulltextServiceKey(); auto ret = doGet(serviceKey); - if (!ret.ok()) { - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + if (!nebula::ok(ret)) { + auto retCode = nebula::error(ret); + if (retCode == cpp2::ErrorCode::E_NOT_FOUND) { + LOG(ERROR) << "Sign out fulltext failed, Fulltext not exists."; + } else { + LOG(ERROR) << "Sign out fulltext failed, error: " + << static_cast(retCode); + } + handleErrorCode(retCode); onFinished(); return; } - handleErrorCode(cpp2::ErrorCode::SUCCEEDED); + doSyncMultiRemoveAndUpdate({std::move(serviceKey)}); } void ListFTClientsProcessor::process(const cpp2::ListFTClientsReq&) { folly::SharedMutex::WriteHolder rHolder(LockUtils::fulltextServicesLock()); - auto prefix = MetaServiceUtils::fulltextServiceKey(); - std::unique_ptr iter; - auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (ret != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Can't find any full text service."; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + const auto& prefix = MetaServiceUtils::fulltextServiceKey(); + auto iterRet = doPrefix(prefix); + if (!nebula::ok(iterRet)) { + auto retCode = nebula::error(iterRet); + LOG(ERROR) << "List fulltext failed, error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } + + auto iter = nebula::value(iterRet).get(); std::vector clients; if (iter->valid()) { clients = MetaServiceUtils::parseFTClients(iter->val()); diff --git a/src/meta/processors/indexMan/GetEdgeIndexProcessor.cpp b/src/meta/processors/indexMan/GetEdgeIndexProcessor.cpp index 98bc81ade..b59469250 100644 --- a/src/meta/processors/indexMan/GetEdgeIndexProcessor.cpp +++ b/src/meta/processors/indexMan/GetEdgeIndexProcessor.cpp @@ -14,27 +14,30 @@ void GetEdgeIndexProcessor::process(const cpp2::GetEdgeIndexReq& req) { CHECK_SPACE_ID_AND_RETURN(spaceID); auto indexName = req.get_index_name(); folly::SharedMutex::ReadHolder rHolder(LockUtils::edgeIndexLock()); - auto edgeIndexIDResult = getIndexID(spaceID, indexName); - if (!edgeIndexIDResult.ok()) { - LOG(ERROR) << "Get Edge Index SpaceID: " << spaceID - << " Index Name: " << indexName << " not found"; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + auto edgeIndexIDRet = getIndexID(spaceID, indexName); + if (!nebula::ok(edgeIndexIDRet)) { + auto retCode = nebula::error(edgeIndexIDRet); + LOG(ERROR) << "Get Edge Index SpaceID: " << spaceID << " Index Name: " << indexName + << " failed, error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } + auto indexId = nebula::value(edgeIndexIDRet); LOG(INFO) << "Get Edge Index SpaceID: " << spaceID << " Index Name: " << indexName; - auto edgeKey = MetaServiceUtils::indexKey(spaceID, edgeIndexIDResult.value()); - auto edgeResult = doGet(edgeKey); - if (!edgeResult.ok()) { - LOG(ERROR) << "Get Edge Index Failed: SpaceID " << spaceID - << " Index Name: " << indexName << " status: " << edgeResult.status(); - resp_.set_code(cpp2::ErrorCode::E_NOT_FOUND); + const auto& indexKey = MetaServiceUtils::indexKey(spaceID, indexId); + auto indexItemRet = doGet(indexKey); + if (!nebula::ok(indexItemRet)) { + auto retCode = nebula::error(indexItemRet); + LOG(ERROR) << "Get Edge Index Failed: SpaceID " << spaceID << " Index Name: " << indexName + << " error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } - auto item = MetaServiceUtils::parseIndex(edgeResult.value()); + auto item = MetaServiceUtils::parseIndex(nebula::value(indexItemRet)); if (item.get_schema_id().getType() != cpp2::SchemaID::Type::edge_type) { LOG(ERROR) << "Get Edge Index Failed: Index Name " << indexName << " is not EdgeIndex"; resp_.set_code(cpp2::ErrorCode::E_NOT_FOUND); diff --git a/src/meta/processors/indexMan/GetTagIndexProcessor.cpp b/src/meta/processors/indexMan/GetTagIndexProcessor.cpp index 224cc96fc..ce76809a6 100644 --- a/src/meta/processors/indexMan/GetTagIndexProcessor.cpp +++ b/src/meta/processors/indexMan/GetTagIndexProcessor.cpp @@ -11,31 +11,34 @@ namespace meta { void GetTagIndexProcessor::process(const cpp2::GetTagIndexReq& req) { auto spaceID = req.get_space_id(); - auto indexName = req.get_index_name(); + const auto& indexName = req.get_index_name(); CHECK_SPACE_ID_AND_RETURN(spaceID); folly::SharedMutex::ReadHolder rHolder(LockUtils::tagIndexLock()); - auto tagIndexIDResult = getIndexID(spaceID, indexName); - if (!tagIndexIDResult.ok()) { - LOG(ERROR) << "Get Tag Index SpaceID: " << spaceID - << " Index Name: " << indexName << " not found"; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + auto tagIndexIDRet = getIndexID(spaceID, indexName); + if (!nebula::ok(tagIndexIDRet)) { + auto retCode = nebula::error(tagIndexIDRet); + LOG(ERROR) << "Get Tag Index SpaceID: " << spaceID << " Index Name: " << indexName + << " failed, error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } + auto indexId = nebula::value(tagIndexIDRet); LOG(INFO) << "Get Tag Index SpaceID: " << spaceID << " Index Name: " << indexName; - auto tagKey = MetaServiceUtils::indexKey(spaceID, tagIndexIDResult.value()); - auto tagResult = doGet(tagKey); - if (!tagResult.ok()) { - LOG(ERROR) << "Get Tag Index Failed: SpaceID " << spaceID - << " Index Name: " << indexName << " status: " << tagResult.status(); - resp_.set_code(cpp2::ErrorCode::E_NOT_FOUND); + const auto& indexKey = MetaServiceUtils::indexKey(spaceID, indexId); + auto indexItemRet = doGet(indexKey); + if (!nebula::ok(indexItemRet)) { + auto retCode = nebula::error(indexItemRet); + LOG(ERROR) << "Get Tag Index Failed: SpaceID " << spaceID << " Index Name: " << indexName + << " error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } - auto item = MetaServiceUtils::parseIndex(tagResult.value()); + auto item = MetaServiceUtils::parseIndex(nebula::value(indexItemRet)); if (item.get_schema_id().getType() != cpp2::SchemaID::Type::tag_id) { LOG(ERROR) << "Get Tag Index Failed: Index Name " << indexName << " is not TagIndex"; resp_.set_code(cpp2::ErrorCode::E_NOT_FOUND); diff --git a/src/meta/processors/indexMan/ListEdgeIndexesProcessor.cpp b/src/meta/processors/indexMan/ListEdgeIndexesProcessor.cpp index 2b4bc23f7..89f1af0dc 100644 --- a/src/meta/processors/indexMan/ListEdgeIndexesProcessor.cpp +++ b/src/meta/processors/indexMan/ListEdgeIndexesProcessor.cpp @@ -10,21 +10,23 @@ namespace nebula { namespace meta { void ListEdgeIndexesProcessor::process(const cpp2::ListEdgeIndexesReq& req) { - CHECK_SPACE_ID_AND_RETURN(req.get_space_id()); - folly::SharedMutex::ReadHolder rHolder(LockUtils::edgeIndexLock()); auto space = req.get_space_id(); - auto prefix = MetaServiceUtils::indexPrefix(space); + CHECK_SPACE_ID_AND_RETURN(space); - std::unique_ptr iter; - auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - handleErrorCode(MetaCommon::to(ret)); - if (ret != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "List Edge Index Failed: SpaceID " << space; + folly::SharedMutex::ReadHolder rHolder(LockUtils::edgeIndexLock()); + const auto& prefix = MetaServiceUtils::indexPrefix(space); + auto iterRet = doPrefix(prefix); + if (!nebula::ok(iterRet)) { + auto retCode = nebula::error(iterRet); + LOG(ERROR) << "List Edge Index Failed: SpaceID " << space + << " error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } - std::vector items; + auto iter = nebula::value(iterRet).get(); + std::vector items; while (iter->valid()) { auto val = iter->val(); auto item = MetaServiceUtils::parseIndex(val); @@ -33,7 +35,7 @@ void ListEdgeIndexesProcessor::process(const cpp2::ListEdgeIndexesReq& req) { } iter->next(); } - resp_.set_code(cpp2::ErrorCode::SUCCEEDED); + handleErrorCode(cpp2::ErrorCode::SUCCEEDED); resp_.set_items(std::move(items)); onFinished(); } diff --git a/src/meta/processors/indexMan/ListTagIndexesProcessor.cpp b/src/meta/processors/indexMan/ListTagIndexesProcessor.cpp index 94dbe682c..7ecc7005a 100644 --- a/src/meta/processors/indexMan/ListTagIndexesProcessor.cpp +++ b/src/meta/processors/indexMan/ListTagIndexesProcessor.cpp @@ -12,19 +12,21 @@ namespace meta { void ListTagIndexesProcessor::process(const cpp2::ListTagIndexesReq& req) { auto space = req.get_space_id(); CHECK_SPACE_ID_AND_RETURN(space); - folly::SharedMutex::ReadHolder rHolder(LockUtils::tagIndexLock()); - auto prefix = MetaServiceUtils::indexPrefix(space); - std::unique_ptr iter; - auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - handleErrorCode(MetaCommon::to(ret)); - if (ret != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "List Tag Index Failed: SpaceID " << space; + folly::SharedMutex::ReadHolder rHolder(LockUtils::tagIndexLock()); + const auto& prefix = MetaServiceUtils::indexPrefix(space); + auto iterRet = doPrefix(prefix); + if (!nebula::ok(iterRet)) { + auto retCode = nebula::error(iterRet); + LOG(ERROR) << "List Tag Index Failed: SpaceID " << space + << " error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } - std::vector items; + auto iter = nebula::value(iterRet).get(); + std::vector items; while (iter->valid()) { auto val = iter->val(); auto item = MetaServiceUtils::parseIndex(val); @@ -33,7 +35,7 @@ void ListTagIndexesProcessor::process(const cpp2::ListTagIndexesReq& req) { } iter->next(); } - resp_.set_code(cpp2::ErrorCode::SUCCEEDED); + handleErrorCode(cpp2::ErrorCode::SUCCEEDED); resp_.set_items(std::move(items)); onFinished(); } diff --git a/src/meta/processors/jobMan/AdminJobProcessor.cpp b/src/meta/processors/jobMan/AdminJobProcessor.cpp index 416f02064..8633d431a 100644 --- a/src/meta/processors/jobMan/AdminJobProcessor.cpp +++ b/src/meta/processors/jobMan/AdminJobProcessor.cpp @@ -32,9 +32,9 @@ void AdminJobProcessor::process(const cpp2::AdminJobReq& req) { { auto cmd = req.get_cmd(); auto paras = req.get_paras(); - if (cmd == nebula::meta::cpp2::AdminCmd::REBUILD_TAG_INDEX || - cmd == nebula::meta::cpp2::AdminCmd::REBUILD_EDGE_INDEX || - cmd == nebula::meta::cpp2::AdminCmd::STATS) { + if (cmd == cpp2::AdminCmd::REBUILD_TAG_INDEX || + cmd == cpp2::AdminCmd::REBUILD_EDGE_INDEX || + cmd == cpp2::AdminCmd::STATS) { if (paras.empty()) { LOG(ERROR) << "Parameter should be not empty"; errorCode = cpp2::ErrorCode::E_INVALID_PARM; diff --git a/src/meta/processors/jobMan/JobDescription.cpp b/src/meta/processors/jobMan/JobDescription.cpp index d5915309b..63d5665fc 100644 --- a/src/meta/processors/jobMan/JobDescription.cpp +++ b/src/meta/processors/jobMan/JobDescription.cpp @@ -9,6 +9,8 @@ #include #include #include +#include "meta/common/MetaCommon.h" +#include "meta/processors/Common.h" #include "meta/processors/jobMan/JobUtils.h" #include "meta/processors/jobMan/JobDescription.h" #include "kvstore/KVIterator.h" @@ -35,18 +37,18 @@ JobDescription::JobDescription(JobID id, startTime_(startTime), stopTime_(stopTime) {} -folly::Optional +ErrorOr JobDescription::makeJobDescription(folly::StringPiece rawkey, folly::StringPiece rawval) { try { if (!isJobKey(rawkey)) { - return folly::none; + return cpp2::ErrorCode::E_INVALID_JOB; } auto key = parseKey(rawkey); if (!isSupportedValue(rawval)) { LOG(ERROR) << "not supported data ver of job " << key; - return folly::none; + return cpp2::ErrorCode::E_INVALID_JOB; } auto tup = parseVal(rawval); @@ -62,7 +64,7 @@ JobDescription::makeJobDescription(folly::StringPiece rawkey, } catch(std::exception& ex) { LOG(ERROR) << ex.what(); } - return folly::none; + return cpp2::ErrorCode::E_INVALID_JOB; } std::string JobDescription::jobKey() const { @@ -179,14 +181,15 @@ bool JobDescription::isJobKey(const folly::StringPiece& rawKey) { return rawKey.size() == JobUtil::jobPrefix().length() + sizeof(int32_t); } -folly::Optional +ErrorOr JobDescription::loadJobDescription(JobID iJob, nebula::kvstore::KVStore* kv) { auto key = makeJobKey(iJob); std::string val; - auto rc = kv->get(0, 0, key, &val); + auto rc = kv->get(kDefaultSpaceId, kDefaultPartId, key, &val); if (rc != nebula::kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Loading Job Description Failed"; - return folly::none; + auto retCode = MetaCommon::to(rc); + LOG(ERROR) << "Loading Job Description Failed" << static_cast(retCode); + return retCode; } return makeJobDescription(key, val); } diff --git a/src/meta/processors/jobMan/JobDescription.h b/src/meta/processors/jobMan/JobDescription.h index a4e422e6b..4fd180df6 100644 --- a/src/meta/processors/jobMan/JobDescription.h +++ b/src/meta/processors/jobMan/JobDescription.h @@ -48,7 +48,7 @@ class JobDescription { /* * return the JobDescription if both key & val is valid * */ - static folly::Optional + static ErrorOr makeJobDescription(folly::StringPiece key, folly::StringPiece val); JobID getJobId() const { return id_; } @@ -96,7 +96,7 @@ class JobDescription { /* * get a existed job from kvstore, return folly::none if there isn't * */ - static folly::Optional + static ErrorOr loadJobDescription(JobID iJob, nebula::kvstore::KVStore* kv); /* diff --git a/src/meta/processors/jobMan/JobManager.cpp b/src/meta/processors/jobMan/JobManager.cpp index c3a84198a..38ec719bb 100644 --- a/src/meta/processors/jobMan/JobManager.cpp +++ b/src/meta/processors/jobMan/JobManager.cpp @@ -13,6 +13,7 @@ #include #include "kvstore/Common.h" #include "kvstore/KVIterator.h" +#include "meta/common/MetaCommon.h" #include "meta/processors/Common.h" #include "meta/processors/admin/AdminClient.h" #include "meta/processors/jobMan/JobManager.h" @@ -84,16 +85,17 @@ void JobManager::scheduleThread() { usleep(FLAGS_job_check_intervals); } - auto jobDesc = JobDescription::loadJobDescription(iJob, kvStore_); - if (jobDesc == folly::none) { + auto jobDescRet = JobDescription::loadJobDescription(iJob, kvStore_); + if (!nebula::ok(jobDescRet)) { LOG(ERROR) << "[JobManager] load an invalid job from queue " << iJob; continue; // leader change or archive happend } - if (!jobDesc->setStatus(cpp2::JobStatus::RUNNING)) { + auto jobDesc = nebula::value(jobDescRet); + if (!jobDesc.setStatus(cpp2::JobStatus::RUNNING)) { LOG(INFO) << "[JobManager] skip job " << iJob; continue; } - save(jobDesc->jobKey(), jobDesc->jobVal()); + save(jobDesc.jobKey(), jobDesc.jobVal()); { std::lock_guard lk(statusGuard_); if (status_ == JbmgrStatus::IDLE) { @@ -101,7 +103,7 @@ void JobManager::scheduleThread() { } } - if (!runJobInternal(*jobDesc)) { + if (!runJobInternal(jobDesc)) { jobFinished(iJob, cpp2::JobStatus::FAILED); } } @@ -154,8 +156,8 @@ cpp2::ErrorCode JobManager::jobFinished(JobID jobId, cpp2::JobStatus jobStatus) SCOPE_EXIT { cleanJob(jobId); }; - auto optJobDesc = JobDescription::loadJobDescription(jobId, kvStore_); - if (!optJobDesc) { + auto optJobDescRet = JobDescription::loadJobDescription(jobId, kvStore_); + if (!nebula::ok(optJobDescRet)) { LOG(WARNING) << folly::sformat("can't load job, jobId={}", jobId); if (jobStatus != cpp2::JobStatus::STOPPED) { // there is a rare condition, that when job finished, @@ -166,10 +168,12 @@ cpp2::ErrorCode JobManager::jobFinished(JobID jobId, cpp2::JobStatus jobStatus) status_ = JbmgrStatus::IDLE; } } - return cpp2::ErrorCode::E_NOT_FOUND; + return nebula::error(optJobDescRet); } - if (!optJobDesc->setStatus(jobStatus)) { + auto optJobDesc = nebula::value(optJobDescRet); + + if (!optJobDesc.setStatus(jobStatus)) { // job already been set as finished, failed or stopped return cpp2::ErrorCode::E_SAVE_JOB_FAILURE; } @@ -179,24 +183,29 @@ cpp2::ErrorCode JobManager::jobFinished(JobID jobId, cpp2::JobStatus jobStatus) status_ = JbmgrStatus::IDLE; } } - auto rc = save(optJobDesc->jobKey(), optJobDesc->jobVal()); - if (rc == nebula::kvstore::ResultCode::ERR_LEADER_CHANGED) { - return cpp2::ErrorCode::E_LEADER_CHANGED; - } else if (rc != nebula::kvstore::ResultCode::SUCCEEDED) { - return cpp2::ErrorCode::E_UNKNOWN; + auto rc = save(optJobDesc.jobKey(), optJobDesc.jobVal()); + if (rc != cpp2::ErrorCode::SUCCEEDED) { + return rc; } auto jobExec = - MetaJobExecutorFactory::createMetaJobExecutor(*optJobDesc, kvStore_, adminClient_); + MetaJobExecutorFactory::createMetaJobExecutor(optJobDesc, kvStore_, adminClient_); if (!jobExec) { LOG(WARNING) << folly::sformat("unable to create jobExecutor, jobId={}", jobId); return cpp2::ErrorCode::E_UNKNOWN; } - if (!optJobDesc->getParas().empty()) { - auto spaceName = optJobDesc->getParas().back(); - auto spaceId = getSpaceId(spaceName); - LOG(INFO) << folly::sformat("spaceName={}, spaceId={}", spaceName, spaceId); + if (!optJobDesc.getParas().empty()) { + auto spaceName = optJobDesc.getParas().back(); + auto spaceIdRet = getSpaceId(spaceName); + if (!nebula::ok(spaceIdRet)) { + auto retCode = nebula::error(spaceIdRet); + LOG(INFO) << "Get spaceName "<< spaceName << " failed, error: " + << static_cast(retCode); + return retCode; + } + + auto spaceId = nebula::value(spaceIdRet); if (spaceId == -1) { return cpp2::ErrorCode::E_STORE_FAILURE; } @@ -219,23 +228,33 @@ cpp2::ErrorCode JobManager::saveTaskStatus(TaskDescription& td, td.setStatus(status); auto jobId = req.get_job_id(); - auto optJobDesc = JobDescription::loadJobDescription(jobId, kvStore_); - if (!optJobDesc) { - LOG(WARNING) << folly::sformat("{}() loadJobDesc failed, jobId={}", __func__, jobId); - return cpp2::ErrorCode::E_TASK_REPORT_OUT_DATE; + auto optJobDescRet = JobDescription::loadJobDescription(jobId, kvStore_); + if (!nebula::ok(optJobDescRet)) { + auto retCode = nebula::error(optJobDescRet); + LOG(WARNING) << "LoadJobDesc failed, jobId " << jobId << " error: " + << static_cast(retCode); + return retCode; } + auto optJobDesc = nebula::value(optJobDescRet); auto jobExec = - MetaJobExecutorFactory::createMetaJobExecutor(*optJobDesc, kvStore_, adminClient_); + MetaJobExecutorFactory::createMetaJobExecutor(optJobDesc, kvStore_, adminClient_); if (!jobExec) { LOG(WARNING) << folly::sformat("createMetaJobExecutor failed(), jobId={}", jobId); return cpp2::ErrorCode::E_TASK_REPORT_OUT_DATE; } else { - if (!optJobDesc->getParas().empty()) { - auto spaceName = optJobDesc->getParas().back(); - auto spaceId = getSpaceId(spaceName); - LOG(INFO) << folly::sformat("spaceName={}, spaceId={}", spaceName, spaceId); + if (!optJobDesc.getParas().empty()) { + auto spaceName = optJobDesc.getParas().back(); + auto spaceIdRet = getSpaceId(spaceName); + if (!nebula::ok(spaceIdRet)) { + auto retCode = nebula::error(spaceIdRet); + LOG(INFO) << "Get spaceName "<< spaceName << " failed, error: " + << static_cast(retCode); + return retCode; + } + + auto spaceId = nebula::value(spaceIdRet); if (spaceId != -1) { jobExec->setSpaceId(spaceId); } @@ -243,8 +262,8 @@ cpp2::ErrorCode JobManager::saveTaskStatus(TaskDescription& td, } auto rcSave = save(td.taskKey(), td.taskVal()); - if (rcSave != kvstore::ResultCode::SUCCEEDED) { - return cpp2::ErrorCode::E_STORE_FAILURE; + if (rcSave != cpp2::ErrorCode::SUCCEEDED) { + return rcSave; } return jobExec->saveSpecialTaskStatus(req); } @@ -268,7 +287,11 @@ cpp2::ErrorCode JobManager::reportTaskFinish(const cpp2::ReportTaskReq& req) { // bacause the last task will update the job's status // tasks shoule report once a time std::lock_guard lk(muReportFinish_); - auto tasks = getAllTasks(jobId); + auto tasksRet = getAllTasks(jobId); + if (!nebula::ok(tasksRet)) { + return nebula::error(tasksRet); + } + auto tasks = nebula::value(tasksRet); auto task = std::find_if(tasks.begin(), tasks.end(), [&](auto& it){ return it.getJobId() == jobId && it.getTaskId() == taskId; }); @@ -298,13 +321,14 @@ cpp2::ErrorCode JobManager::reportTaskFinish(const cpp2::ReportTaskReq& req) { return cpp2::ErrorCode::SUCCEEDED; } -std::list JobManager::getAllTasks(JobID jobId) { +ErrorOr> +JobManager::getAllTasks(JobID jobId) { std::list taskDescriptions; auto jobKey = JobDescription::makeJobKey(jobId); std::unique_ptr iter; ResultCode rc = kvStore_->prefix(kDefaultSpaceId, kDefaultPartId, jobKey, &iter); if (rc != nebula::kvstore::ResultCode::SUCCEEDED) { - return taskDescriptions; + return MetaCommon::to(rc); } for (; iter->valid(); iter->next()) { if (JobDescription::isJobKey(iter->key())) { @@ -317,14 +341,14 @@ std::list JobManager::getAllTasks(JobID jobId) { cpp2::ErrorCode JobManager::addJob(const JobDescription& jobDesc, AdminClient* client) { auto rc = save(jobDesc.jobKey(), jobDesc.jobVal()); - if (rc == nebula::kvstore::ResultCode::SUCCEEDED) { + if (rc == cpp2::ErrorCode::SUCCEEDED) { auto jobId = jobDesc.getJobId(); enqueue(jobId, jobDesc.getCmd()); // Add job to jobMap inFlightJobs_.emplace(jobId, jobDesc); } else { LOG(ERROR) << "Add Job Failed"; - return cpp2::ErrorCode::E_ADD_JOB_FAILURE; + return rc; } adminClient_ = client; return cpp2::ErrorCode::SUCCEEDED; @@ -357,40 +381,45 @@ JobManager::showJobs() { auto rc = kvStore_->prefix(kDefaultSpaceId, kDefaultPartId, JobUtil::jobPrefix(), &iter); if (rc != nebula::kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Fetch Jobs Failed"; - return cpp2::ErrorCode::E_STORE_FAILURE; + auto retCode = MetaCommon::to(rc); + LOG(ERROR) << "Fetch Jobs Failed, error: " << static_cast(retCode); + return retCode; } int32_t lastExpiredJobId = INT_MIN; std::vector expiredJobKeys; std::vector ret; + for (; iter->valid(); iter->next()) { - if (JobDescription::isJobKey(iter->key())) { - auto optJob = JobDescription::makeJobDescription(iter->key(), iter->val()); - if (optJob == folly::none) { - expiredJobKeys.emplace_back(iter->key()); + auto jobKey = iter->key(); + if (JobDescription::isJobKey(jobKey)) { + auto optJobRet = JobDescription::makeJobDescription(jobKey, iter->val()); + if (!nebula::ok(optJobRet)) { + expiredJobKeys.emplace_back(jobKey); continue; } + auto optJob = nebula::value(optJobRet); // skip expired job, default 1 week - auto jobDesc = optJob->toJobDesc(); + auto jobDesc = optJob.toJobDesc(); if (isExpiredJob(jobDesc)) { lastExpiredJobId = jobDesc.get_id(); LOG(INFO) << "remove expired job " << lastExpiredJobId; - expiredJobKeys.emplace_back(iter->key()); + expiredJobKeys.emplace_back(jobKey); continue; } ret.emplace_back(jobDesc); } else { // iter-key() is a TaskKey - TaskDescription task(iter->key(), iter->val()); + TaskDescription task(jobKey, iter->val()); if (task.getJobId() == lastExpiredJobId) { - expiredJobKeys.emplace_back(iter->key()); + expiredJobKeys.emplace_back(jobKey); } } } - if (!removeExpiredJobs(std::move(expiredJobKeys))) { + auto retCode = removeExpiredJobs(std::move(expiredJobKeys)); + if (retCode != cpp2::ErrorCode::SUCCEEDED) { LOG(ERROR) << "Remove Expired Jobs Failed"; - return cpp2::ErrorCode::E_STORE_FAILURE; + return retCode; } std::sort(ret.begin(), ret.end(), [](const auto& a, const auto& b) { @@ -409,19 +438,19 @@ bool JobManager::isExpiredJob(const cpp2::JobDesc& jobDesc) { return duration > FLAGS_job_expired_secs; } -bool JobManager::removeExpiredJobs(std::vector&& expiredJobsAndTasks) { - bool result = true; +cpp2::ErrorCode JobManager::removeExpiredJobs(std::vector&& expiredJobsAndTasks) { + kvstore::ResultCode ret; folly::Baton baton; kvStore_->asyncMultiRemove(kDefaultSpaceId, kDefaultPartId, std::move(expiredJobsAndTasks), [&](nebula::kvstore::ResultCode code) { if (code != kvstore::ResultCode::SUCCEEDED) { - result = false; LOG(ERROR) << "kvstore asyncRemoveRange failed: " << code; } + ret = code; baton.post(); }); baton.wait(); - return result; + return MetaCommon::to(ret); } bool JobManager::checkJobExist(const cpp2::AdminCmd& cmd, @@ -446,7 +475,7 @@ JobManager::showJob(JobID iJob) { std::unique_ptr iter; auto rc = kvStore_->prefix(kDefaultSpaceId, kDefaultPartId, jobKey, &iter); if (rc != nebula::kvstore::ResultCode::SUCCEEDED) { - return cpp2::ErrorCode::E_STORE_FAILURE; + return MetaCommon::to(rc); } if (!iter->valid()) { @@ -455,13 +484,16 @@ JobManager::showJob(JobID iJob) { std::pair> ret; for (; iter->valid(); iter->next()) { - if (JobDescription::isJobKey(iter->key())) { - auto optJob = JobDescription::makeJobDescription(iter->key(), iter->val()); - if (optJob != folly::none) { - ret.first = optJob->toJobDesc(); + auto jKey = iter->key(); + if (JobDescription::isJobKey(jKey)) { + auto optJobRet = JobDescription::makeJobDescription(jKey, iter->val()); + if (!nebula::ok(optJobRet)) { + return nebula::error(optJobRet); } + auto optJob = nebula::value(optJobRet); + ret.first = optJob.toJobDesc(); } else { - TaskDescription td(iter->key(), iter->val()); + TaskDescription td(jKey, iter->val()); ret.second.emplace_back(td.toTaskDesc()); } } @@ -481,25 +513,27 @@ ErrorOr JobManager::recoverJob() { std::unique_ptr iter; auto rc = kvStore_->prefix(kDefaultSpaceId, kDefaultPartId, JobUtil::jobPrefix(), &iter); if (rc != nebula::kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Can't find jobs"; - return cpp2::ErrorCode::E_NOT_FOUND; + auto retCode = MetaCommon::to(rc); + LOG(ERROR) << "Can't find jobs, error: " << static_cast(retCode); + return retCode; } for (; iter->valid(); iter->next()) { if (!JobDescription::isJobKey(iter->key())) { continue; } - auto optJob = JobDescription::makeJobDescription(iter->key(), iter->val()); - if (optJob != folly::none) { - if (optJob->getStatus() == cpp2::JobStatus::QUEUE) { + auto optJobRet = JobDescription::makeJobDescription(iter->key(), iter->val()); + if (nebula::ok(optJobRet)) { + auto optJob = nebula::value(optJobRet); + if (optJob.getStatus() == cpp2::JobStatus::QUEUE) { // Check if the job exists JobID jId = 0; - auto jobExist = checkJobExist(optJob->getCmd(), optJob->getParas(), jId); + auto jobExist = checkJobExist(optJob.getCmd(), optJob.getParas(), jId); if (!jobExist) { - auto jobId = optJob->getJobId(); - enqueue(jobId, optJob->getCmd()); - inFlightJobs_.emplace(jobId, *optJob); + auto jobId = optJob.getJobId(); + enqueue(jobId, optJob.getCmd()); + inFlightJobs_.emplace(jobId, optJob); ++recoveredJobNum; } } @@ -508,7 +542,7 @@ ErrorOr JobManager::recoverJob() { return recoveredJobNum; } -ResultCode JobManager::save(const std::string& k, const std::string& v) { +cpp2::ErrorCode JobManager::save(const std::string& k, const std::string& v) { std::vector data{std::make_pair(k, v)}; folly::Baton baton; auto rc = nebula::kvstore::ResultCode::SUCCEEDED; @@ -518,16 +552,17 @@ ResultCode JobManager::save(const std::string& k, const std::string& v) { baton.post(); }); baton.wait(); - return rc; + return MetaCommon::to(rc); } -GraphSpaceID JobManager::getSpaceId(const std::string& name) { +ErrorOr JobManager::getSpaceId(const std::string& name) { auto indexKey = MetaServiceUtils::indexSpaceKey(name); std::string val; auto ret = kvStore_->get(kDefaultSpaceId, kDefaultPartId, indexKey, &val); if (ret != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "KVStore error: " << ret;; - return -1; + auto retCode = MetaCommon::to(ret); + LOG(ERROR) << "KVStore error: " << static_cast(retCode);; + return retCode; } return *reinterpret_cast(val.c_str()); } diff --git a/src/meta/processors/jobMan/JobManager.h b/src/meta/processors/jobMan/JobManager.h index b4789b672..8a38bfc1a 100644 --- a/src/meta/processors/jobMan/JobManager.h +++ b/src/meta/processors/jobMan/JobManager.h @@ -110,15 +110,15 @@ class JobManager : public nebula::cpp::NonCopyable, public nebula::cpp::NonMovab bool runJobInternal(const JobDescription& jobDesc); bool runJobInternalOld(const JobDescription& jobDesc); - GraphSpaceID getSpaceId(const std::string& name); + ErrorOr getSpaceId(const std::string& name); - kvstore::ResultCode save(const std::string& k, const std::string& v); + cpp2::ErrorCode save(const std::string& k, const std::string& v); static bool isExpiredJob(const cpp2::JobDesc& jobDesc); - bool removeExpiredJobs(std::vector&& jobKeys); + cpp2::ErrorCode removeExpiredJobs(std::vector&& jobKeys); - std::list getAllTasks(JobID jobId); + ErrorOr> getAllTasks(JobID jobId); void cleanJob(JobID jobId); diff --git a/src/meta/processors/jobMan/ListEdgeIndexStatusProcessor.cpp b/src/meta/processors/jobMan/ListEdgeIndexStatusProcessor.cpp index 48345f8ad..68db62ba8 100644 --- a/src/meta/processors/jobMan/ListEdgeIndexStatusProcessor.cpp +++ b/src/meta/processors/jobMan/ListEdgeIndexStatusProcessor.cpp @@ -15,30 +15,38 @@ void ListEdgeIndexStatusProcessor::process(const cpp2::ListIndexStatusReq& req) std::unique_ptr iter; auto rc = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, JobUtil::jobPrefix(), &iter); if (rc != nebula::kvstore::ResultCode::SUCCEEDED) { - handleErrorCode(cpp2::ErrorCode::E_STORE_FAILURE); + auto retCode = MetaCommon::to(rc); + LOG(ERROR) << "Loading Job Failed" << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } std::vector jobs; - std::vector statuses; + std::vector statuses; for (; iter->valid(); iter->next()) { if (JobDescription::isJobKey(iter->key())) { - auto optJob = JobDescription::makeJobDescription(iter->key(), iter->val()); - if (optJob == folly::none) { + auto optJobRet = JobDescription::makeJobDescription(iter->key(), iter->val()); + if (!nebula::ok(optJobRet)) { continue; } - - auto jobDesc = optJob->toJobDesc(); + auto optJob = nebula::value(optJobRet); + auto jobDesc = optJob.toJobDesc(); if (jobDesc.get_cmd() == meta::cpp2::AdminCmd::REBUILD_EDGE_INDEX) { auto paras = jobDesc.get_paras(); DCHECK_GE(paras.size(), 1); auto spaceName = paras.back(); auto ret = getSpaceId(spaceName); - if (!ret.ok()) { + if (!nebula::ok(ret)) { + auto retCode = nebula::error(ret); + if (retCode == cpp2::ErrorCode::E_LEADER_CHANGED) { + handleErrorCode(retCode); + onFinished(); + return; + } continue; } - auto spaceId = ret.value(); + auto spaceId = nebula::value(ret); if (spaceId != curSpaceId) { continue; } diff --git a/src/meta/processors/jobMan/ListTagIndexStatusProcessor.cpp b/src/meta/processors/jobMan/ListTagIndexStatusProcessor.cpp index 51a86fdd5..dcaa3e05c 100644 --- a/src/meta/processors/jobMan/ListTagIndexStatusProcessor.cpp +++ b/src/meta/processors/jobMan/ListTagIndexStatusProcessor.cpp @@ -15,30 +15,39 @@ void ListTagIndexStatusProcessor::process(const cpp2::ListIndexStatusReq& req) { std::unique_ptr iter; auto rc = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, JobUtil::jobPrefix(), &iter); if (rc != nebula::kvstore::ResultCode::SUCCEEDED) { - handleErrorCode(cpp2::ErrorCode::E_STORE_FAILURE); + auto retCode = MetaCommon::to(rc); + LOG(ERROR) << "Loading Job Failed" << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } std::vector jobs; - std::vector statuses; + std::vector statuses; for (; iter->valid(); iter->next()) { if (JobDescription::isJobKey(iter->key())) { - auto optJob = JobDescription::makeJobDescription(iter->key(), iter->val()); - if (optJob == folly::none) { + auto optJobRet = JobDescription::makeJobDescription(iter->key(), iter->val()); + if (!nebula::ok(optJobRet)) { continue; } - auto jobDesc = optJob->toJobDesc(); - if (jobDesc.get_cmd() == meta::cpp2::AdminCmd::REBUILD_TAG_INDEX) { + auto optJob = nebula::value(optJobRet); + auto jobDesc = optJob.toJobDesc(); + if (jobDesc.get_cmd() == cpp2::AdminCmd::REBUILD_TAG_INDEX) { auto paras = jobDesc.get_paras(); DCHECK_GE(paras.size(), 1); auto spaceName = paras.back(); auto ret = getSpaceId(spaceName); - if (!ret.ok()) { + if (!nebula::ok(ret)) { + auto retCode = nebula::error(ret); + if (retCode == cpp2::ErrorCode::E_LEADER_CHANGED) { + handleErrorCode(retCode); + onFinished(); + return; + } continue; } - auto spaceId = ret.value(); + auto spaceId = nebula::value(ret); if (spaceId != curSpaceId) { continue; } diff --git a/src/meta/processors/jobMan/MetaJobExecutor.cpp b/src/meta/processors/jobMan/MetaJobExecutor.cpp index c6d9f24ca..234aedc71 100644 --- a/src/meta/processors/jobMan/MetaJobExecutor.cpp +++ b/src/meta/processors/jobMan/MetaJobExecutor.cpp @@ -4,11 +4,10 @@ * attached with Common Clause Condition 1.0, found in the LICENSES directory. */ -#include -#include #include "common/network/NetworkUtils.h" #include "common/interface/gen-cpp2/common_types.h" #include "meta/ActiveHostsMan.h" +#include "meta/common/MetaCommon.h" #include "meta/MetaServiceUtils.h" #include "meta/processors/Common.h" #include "meta/processors/admin/AdminClient.h" @@ -74,8 +73,10 @@ MetaJobExecutor::getSpaceIdFromName(const std::string& spaceName) { std::string val; auto rc = kvstore_->get(kDefaultSpaceId, kDefaultPartId, indexKey, &val); if (rc != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Get space ID failed space name: " << spaceName; - return cpp2::ErrorCode::E_NOT_FOUND; + auto retCode = MetaCommon::to(rc); + LOG(ERROR) << "Get space failed, space name: " << spaceName << " error: " + << static_cast(retCode); + return retCode; } return *reinterpret_cast(val.c_str()); } @@ -85,8 +86,10 @@ ErrOrHosts MetaJobExecutor::getTargetHost(GraphSpaceID spaceId) { auto partPrefix = MetaServiceUtils::partPrefix(spaceId); auto rc = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, partPrefix, &iter); if (rc != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Fetch Parts Failed"; - return cpp2::ErrorCode::E_NOT_FOUND; + auto retCode = MetaCommon::to(rc); + LOG(ERROR) << "Fetch Parts Failed, error: " + << static_cast(retCode); + return retCode; } // use vector instead of set because this can convient for next step @@ -110,8 +113,10 @@ ErrOrHosts MetaJobExecutor::getLeaderHost(GraphSpaceID space) { std::unique_ptr leaderIter; auto result = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, hostPrefix, &leaderIter); if (result != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Get space " << space << "'s part failed"; - return cpp2::ErrorCode::E_NOT_FOUND; + auto retCode = MetaCommon::to(result); + LOG(ERROR) << "Get space " << space << "'s part failed, error: " + << static_cast(retCode); + return retCode; } std::vector>> hosts; @@ -122,7 +127,13 @@ ErrOrHosts MetaJobExecutor::getLeaderHost(GraphSpaceID space) { return cpp2::ErrorCode::E_INVALID_PARM; } - if (ActiveHostsMan::isLived(kvstore_, hostAddr)) { + auto livedHostRet = ActiveHostsMan::isLived(kvstore_, hostAddr); + if (!nebula::ok(livedHostRet)) { + LOG(ERROR) << "Get host failed"; + return nebula::error(livedHostRet); + } + + if (nebula::value(livedHostRet)) { auto leaderParts = MetaServiceUtils::parseLeaderVal(leaderIter->val()); auto parts = leaderParts[space]; hosts.emplace_back(std::make_pair(std::move(hostAddr), std::move(parts))); @@ -142,7 +153,7 @@ cpp2::ErrorCode MetaJobExecutor::execute() { if (!nebula::ok(addressesRet)) { LOG(ERROR) << "Can't get hosts"; - return cpp2::ErrorCode::E_NO_HOSTS; + return nebula::error(addressesRet); } std::vector parts; @@ -164,7 +175,7 @@ cpp2::ErrorCode MetaJobExecutor::execute() { baton.wait(); if (rc != nebula::kvstore::ResultCode::SUCCEEDED) { LOG(INFO) << "write to kv store failed. E_STORE_FAILURE"; - return cpp2::ErrorCode::E_STORE_FAILURE; + return MetaCommon::to(rc); } } diff --git a/src/meta/processors/jobMan/MetaJobExecutor.h b/src/meta/processors/jobMan/MetaJobExecutor.h index 6c865dddc..6417635dd 100644 --- a/src/meta/processors/jobMan/MetaJobExecutor.h +++ b/src/meta/processors/jobMan/MetaJobExecutor.h @@ -46,7 +46,9 @@ class MetaJobExecutor { // Stop the job when the user cancel it. virtual cpp2::ErrorCode stop() = 0; - virtual void finish(bool) {} + virtual cpp2::ErrorCode finish(bool) { + return cpp2::ErrorCode::SUCCEEDED; + } void setSpaceId(GraphSpaceID spaceId) { space_ = spaceId; } diff --git a/src/meta/processors/jobMan/RebuildJobExecutor.cpp b/src/meta/processors/jobMan/RebuildJobExecutor.cpp index dad07830c..a8c89c8ca 100644 --- a/src/meta/processors/jobMan/RebuildJobExecutor.cpp +++ b/src/meta/processors/jobMan/RebuildJobExecutor.cpp @@ -36,8 +36,10 @@ cpp2::ErrorCode RebuildJobExecutor::prepare() { auto indexKey = MetaServiceUtils::indexIndexKey(space_, paras_[i]); auto result = kvstore_->get(kDefaultSpaceId, kDefaultPartId, indexKey, &indexValue); if (result != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Get indexKey error indexName: " << paras_[i]; - return cpp2::ErrorCode::E_NOT_FOUND; + auto retCode = MetaCommon::to(result); + LOG(ERROR) << "Get indexKey error indexName: " << paras_[i] << " error: " + << static_cast(retCode); + return retCode; } indexId = *reinterpret_cast(indexValue.c_str()); @@ -51,7 +53,7 @@ meta::cpp2::ErrorCode RebuildJobExecutor::stop() { auto errOrTargetHost = getTargetHost(space_); if (!nebula::ok(errOrTargetHost)) { LOG(ERROR) << "Get target host failed"; - return cpp2::ErrorCode::E_NO_HOSTS; + return nebula::error(errOrTargetHost); } auto& hosts = nebula::value(errOrTargetHost); diff --git a/src/meta/processors/jobMan/StatisJobExecutor.cpp b/src/meta/processors/jobMan/StatisJobExecutor.cpp index 9e569eb20..7e9a0b63a 100644 --- a/src/meta/processors/jobMan/StatisJobExecutor.cpp +++ b/src/meta/processors/jobMan/StatisJobExecutor.cpp @@ -4,6 +4,7 @@ * attached with Common Clause Condition 1.0, found in the LICENSES directory. */ +#include "meta/common/MetaCommon.h" #include "meta/MetaServiceUtils.h" #include "meta/processors/jobMan/StatisJobExecutor.h" #include "meta/processors/Common.h" @@ -17,7 +18,7 @@ bool StatisJobExecutor::check() { return paras_.size() == 1; } -kvstore::ResultCode +cpp2::ErrorCode StatisJobExecutor::save(const std::string& key, const std::string& val) { std::vector data{std::make_pair(key, val)}; folly::Baton baton; @@ -28,14 +29,19 @@ StatisJobExecutor::save(const std::string& key, const std::string& val) { baton.post(); }); baton.wait(); - return rc; + return MetaCommon::to(rc); } -void StatisJobExecutor::doRemove(const std::string& key) { +cpp2::ErrorCode StatisJobExecutor::doRemove(const std::string& key) { folly::Baton baton; - kvstore_->asyncRemove( - kDefaultSpaceId, kDefaultPartId, key, [&](nebula::kvstore::ResultCode) { baton.post(); }); + auto rc = nebula::kvstore::ResultCode::SUCCEEDED; + kvstore_->asyncRemove(kDefaultSpaceId, kDefaultPartId, key, + [&](nebula::kvstore::ResultCode code) { + rc = code; + baton.post(); + }); baton.wait(); + return MetaCommon::to(rc); } cpp2::ErrorCode StatisJobExecutor::prepare() { @@ -51,8 +57,7 @@ cpp2::ErrorCode StatisJobExecutor::prepare() { statisItem.set_status(cpp2::JobStatus::RUNNING); auto statisKey = MetaServiceUtils::statisKey(space_); auto statisVal = MetaServiceUtils::statisVal(statisItem); - save(statisKey, statisVal); - return cpp2::ErrorCode::SUCCEEDED; + return save(statisKey, statisVal); } folly::Future @@ -110,16 +115,22 @@ cpp2::ErrorCode StatisJobExecutor::saveSpecialTaskStatus(const cpp2::ReportTaskR auto tempKey = toTempKey(req.get_job_id()); std::string val; auto ret = kvstore_->get(kDefaultSpaceId, kDefaultPartId, tempKey, &val); - if (ret == kvstore::ResultCode::ERR_KEY_NOT_FOUND) { + + if (ret != kvstore::ResultCode::SUCCEEDED) { + if (ret != kvstore::ResultCode::ERR_KEY_NOT_FOUND) { + return MetaCommon::to(ret); + } ret = kvstore_->get(kDefaultSpaceId, kDefaultPartId, statisKey, &val); } - if (ret == kvstore::ResultCode::SUCCEEDED) { - statisItem = MetaServiceUtils::parseStatisVal(val); + + if (ret != kvstore::ResultCode::SUCCEEDED) { + return MetaCommon::to(ret); } + + statisItem = MetaServiceUtils::parseStatisVal(val); addStatis(statisItem, *req.statis_ref()); auto statisVal = MetaServiceUtils::statisVal(statisItem); - save(tempKey, statisVal); - return cpp2::ErrorCode::SUCCEEDED; + return save(tempKey, statisVal); } /** @@ -135,14 +146,14 @@ std::string StatisJobExecutor::toTempKey(int32_t jobId) { return key.append(reinterpret_cast(&jobId), sizeof(int32_t)); } -void StatisJobExecutor::finish(bool exeSuccessed) { +cpp2::ErrorCode StatisJobExecutor::finish(bool exeSuccessed) { auto statisKey = MetaServiceUtils::statisKey(space_); auto tempKey = toTempKey(jobId_); std::string val; auto ret = kvstore_->get(kDefaultSpaceId, kDefaultPartId, tempKey, &val); if (ret != kvstore::ResultCode::SUCCEEDED) { LOG(ERROR) << "Can't find the statis data, spaceId : " << space_; - return; + return MetaCommon::to(ret); } auto statisItem = MetaServiceUtils::parseStatisVal(val); if (exeSuccessed) { @@ -151,15 +162,19 @@ void StatisJobExecutor::finish(bool exeSuccessed) { statisItem.set_status(cpp2::JobStatus::FAILED); } auto statisVal = MetaServiceUtils::statisVal(statisItem); - save(statisKey, statisVal); - doRemove(tempKey); + auto retCode = save(statisKey, statisVal); + if (retCode != cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Sace statis data failed, error " << static_cast(retCode);; + return retCode; + } + return doRemove(tempKey); } cpp2::ErrorCode StatisJobExecutor::stop() { auto errOrTargetHost = getTargetHost(space_); if (!nebula::ok(errOrTargetHost)) { LOG(ERROR) << "Get target host failed"; - return cpp2::ErrorCode::E_NO_HOSTS; + return nebula::error(errOrTargetHost); } auto& hosts = nebula::value(errOrTargetHost); diff --git a/src/meta/processors/jobMan/StatisJobExecutor.h b/src/meta/processors/jobMan/StatisJobExecutor.h index 9e6d4e0cb..004cd1045 100644 --- a/src/meta/processors/jobMan/StatisJobExecutor.h +++ b/src/meta/processors/jobMan/StatisJobExecutor.h @@ -34,7 +34,7 @@ class StatisJobExecutor : public MetaJobExecutor { executeInternal(HostAddr&& address, std::vector&& parts) override; // Summarize the results of statisItem_ - void finish(bool exeSuccessed) override; + cpp2::ErrorCode finish(bool exeSuccessed) override; cpp2::ErrorCode saveSpecialTaskStatus(const cpp2::ReportTaskReq& req) override; @@ -42,13 +42,13 @@ class StatisJobExecutor : public MetaJobExecutor { // Statis job writes an additional data. // The additional data is written when the statis job passes the check function. // Update this additional data when job finishes. - kvstore::ResultCode save(const std::string& key, const std::string& val); + cpp2::ErrorCode save(const std::string& key, const std::string& val); void addStatis(cpp2::StatisItem& lhs, const cpp2::StatisItem& rhs); std::string toTempKey(int32_t jobId); - void doRemove(const std::string& key); + cpp2::ErrorCode doRemove(const std::string& key); private: // Statis results diff --git a/src/meta/processors/listenerMan/ListenerProcessor.cpp b/src/meta/processors/listenerMan/ListenerProcessor.cpp index a022095a2..4ac78d897 100644 --- a/src/meta/processors/listenerMan/ListenerProcessor.cpp +++ b/src/meta/processors/listenerMan/ListenerProcessor.cpp @@ -19,24 +19,33 @@ void AddListenerProcessor::process(const cpp2::AddListenerReq& req) { auto type = req.get_type(); const auto& hosts = req.get_hosts(); auto ret = listenerExist(space, type); - if (ret != Status::ListenerNotFound()) { - handleErrorCode(cpp2::ErrorCode::E_EXISTED); + if (ret != cpp2::ErrorCode::E_NOT_FOUND) { + if (ret == cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Add listener failed, listener already exists."; + ret = cpp2::ErrorCode::E_EXISTED; + } else { + LOG(ERROR) << "Add listener failed, error: " << static_cast(ret); + } + handleErrorCode(ret); onFinished(); return; } + // TODO : (sky) if type is elasticsearch, need check text search service. folly::SharedMutex::WriteHolder wHolder(LockUtils::listenerLock()); folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); - auto prefix = MetaServiceUtils::partPrefix(space); - std::unique_ptr iter; - auto kvRet = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (kvRet != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "List Parts Failed: No parts"; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + const auto& prefix = MetaServiceUtils::partPrefix(space); + auto iterRet = doPrefix(prefix); + if (!nebula::ok(iterRet)) { + auto retCode = nebula::error(iterRet); + LOG(ERROR) << "List parts failed, error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } + std::vector parts; + auto iter = nebula::value(iterRet).get(); while (iter->valid()) { parts.emplace_back(MetaServiceUtils::parsePartKeyPartId(iter->key())); iter->next(); @@ -46,7 +55,6 @@ void AddListenerProcessor::process(const cpp2::AddListenerReq& req) { data.emplace_back(MetaServiceUtils::listenerKey(space, parts[i], type), MetaServiceUtils::serializeHostAddr(hosts[i%hosts.size()])); } - handleErrorCode(cpp2::ErrorCode::SUCCEEDED); doSyncPutAndUpdate(std::move(data)); } @@ -55,48 +63,64 @@ void RemoveListenerProcessor::process(const cpp2::RemoveListenerReq& req) { CHECK_SPACE_ID_AND_RETURN(space); auto type = req.get_type(); auto ret = listenerExist(space, type); - if (ret != Status::OK()) { - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + if (ret != cpp2::ErrorCode::SUCCEEDED) { + if (ret == cpp2::ErrorCode::E_NOT_FOUND) { + LOG(ERROR) << "Remove listener failed, listener not exists."; + } else { + LOG(ERROR) << "Remove listener failed, error: " << static_cast(ret); + } + handleErrorCode(ret); onFinished(); return; } + folly::SharedMutex::WriteHolder wHolder(LockUtils::listenerLock()); std::vector keys; - std::unique_ptr iter; - auto prefix = MetaServiceUtils::listenerPrefix(space, type); - auto listenerRet = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (listenerRet != kvstore::ResultCode::SUCCEEDED) { - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + const auto& prefix = MetaServiceUtils::listenerPrefix(space, type); + auto iterRet = doPrefix(prefix); + if (!nebula::ok(iterRet)) { + auto retCode = nebula::error(iterRet); + LOG(ERROR) << "Remove listener failed, error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } + + auto iter = nebula::value(iterRet).get(); while (iter->valid()) { keys.emplace_back(iter->key()); iter->next(); } - handleErrorCode(cpp2::ErrorCode::SUCCEEDED); - doSyncMultiRemoveAndUpdate({std::move(keys)}); + doSyncMultiRemoveAndUpdate(std::move(keys)); } void ListListenerProcessor::process(const cpp2::ListListenerReq& req) { auto space = req.get_space_id(); CHECK_SPACE_ID_AND_RETURN(space); folly::SharedMutex::ReadHolder rHolder(LockUtils::listenerLock()); - std::unique_ptr iter; - std::string prefix = MetaServiceUtils::listenerPrefix(space); - auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (ret != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Can't find any listener."; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + const auto& prefix = MetaServiceUtils::listenerPrefix(space); + auto iterRet = doPrefix(prefix); + if (!nebula::ok(iterRet)) { + auto retCode = nebula::error(iterRet); + LOG(ERROR) << "List listener failed, error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } - auto activeHosts = ActiveHostsMan::getActiveHosts( + auto activeHostsRet = ActiveHostsMan::getActiveHosts( kvstore_, FLAGS_heartbeat_interval_secs * FLAGS_expired_time_factor, cpp2::HostRole::LISTENER); + if (!nebula::ok(activeHostsRet)) { + handleErrorCode(nebula::error(activeHostsRet)); + onFinished(); + return; + } + std::vector listeners; + auto activeHosts = std::move(nebula::value(activeHostsRet)); + auto iter = nebula::value(iterRet).get(); while (iter->valid()) { cpp2::ListenerInfo listener; listener.set_type(MetaServiceUtils::parseListenerType(iter->key())); @@ -115,5 +139,6 @@ void ListListenerProcessor::process(const cpp2::ListListenerReq& req) { handleErrorCode(cpp2::ErrorCode::SUCCEEDED); onFinished(); } + } // namespace meta } // namespace nebula diff --git a/src/meta/processors/partsMan/CreateSpaceProcessor.cpp b/src/meta/processors/partsMan/CreateSpaceProcessor.cpp index cb0990bea..67e576429 100644 --- a/src/meta/processors/partsMan/CreateSpaceProcessor.cpp +++ b/src/meta/processors/partsMan/CreateSpaceProcessor.cpp @@ -18,25 +18,31 @@ const std::string defaultGroup = "default"; // NOLINT void CreateSpaceProcessor::process(const cpp2::CreateSpaceReq& req) { folly::SharedMutex::WriteHolder wHolder(LockUtils::spaceLock()); auto properties = req.get_properties(); - auto spaceRet = getSpaceId(properties.get_space_name()); - if (spaceRet.ok()) { - cpp2::ErrorCode ret; - if (req.get_if_not_exists()) { - ret = cpp2::ErrorCode::SUCCEEDED; - } else { - LOG(ERROR) << "Create Space Failed : Space " << properties.get_space_name() + auto spaceName = properties.get_space_name(); + auto spaceRet = getSpaceId(spaceName); + + if (nebula::ok(spaceRet)) { + cpp2::ErrorCode ret = cpp2::ErrorCode::SUCCEEDED; + if (!req.get_if_not_exists()) { + LOG(ERROR) << "Create Space Failed : Space " << spaceName << " have existed!"; ret = cpp2::ErrorCode::E_EXISTED; } - - resp_.set_id(to(spaceRet.value(), EntryType::SPACE)); + resp_.set_id(to(nebula::value(spaceRet), EntryType::SPACE)); handleErrorCode(ret); onFinished(); return; + } else { + auto retCode = nebula::error(spaceRet); + if (retCode != cpp2::ErrorCode::E_NOT_FOUND) { + LOG(ERROR) << "Create Space Failed : Space " << spaceName + << static_cast(retCode); + handleErrorCode(retCode); + onFinished(); + return; + } } - CHECK_EQ(Status::SpaceNotFound(), spaceRet.status()); - auto spaceName = properties.get_space_name(); auto partitionNum = properties.get_partition_num(); auto replicaFactor = properties.get_replica_factor(); auto charsetName = properties.get_charset_name(); @@ -61,7 +67,7 @@ void CreateSpaceProcessor::process(const cpp2::CreateSpaceReq& req) { replicaFactor = FLAGS_default_replica_factor; if (replicaFactor <= 0) { LOG(ERROR) << "Create Space Failed : replicaFactor is illegal: " << replicaFactor; - resp_.set_code(cpp2::ErrorCode::E_INVALID_PARM); + handleErrorCode(cpp2::ErrorCode::E_INVALID_PARM); onFinished(); return; } @@ -110,43 +116,43 @@ void CreateSpaceProcessor::process(const cpp2::CreateSpaceReq& req) { LOG(INFO) << "Create Space on group: " << groupName; auto groupKey = MetaServiceUtils::groupKey(groupName); auto ret = doGet(groupKey); - if (!ret.ok()) { - LOG(ERROR) << "Group Name: " << groupName << " not found"; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + if (!nebula::ok(ret)) { + LOG(ERROR) << " Get Group Name: " << groupName << " failed."; + handleErrorCode(nebula::error(ret)); onFinished(); return; } - auto zones = MetaServiceUtils::parseZoneNames(ret.value()); + auto zones = MetaServiceUtils::parseZoneNames(nebula::value(ret)); int32_t zoneNum = zones.size(); if (replicaFactor > zoneNum) { - LOG(ERROR) << "Replication number should less than or equal to zone number"; + LOG(ERROR) << "Replication number should less than or equal to zone number."; handleErrorCode(cpp2::ErrorCode::E_INVALID_PARM); onFinished(); return; } auto hostLoadingRet = getHostLoading(); - if (!hostLoadingRet.ok()) { - LOG(ERROR) << "Get host loading failed"; - handleErrorCode(cpp2::ErrorCode::E_INVALID_PARM); + if (!nebula::ok(hostLoadingRet)) { + LOG(ERROR) << "Get host loading failed."; + handleErrorCode(nebula::error(hostLoadingRet)); onFinished(); return; } - hostLoading_ = std::move(hostLoadingRet).value(); + hostLoading_ = std::move(nebula::value(hostLoadingRet)); std::unordered_map zoneHosts; for (auto& zone : zones) { auto zoneKey = MetaServiceUtils::zoneKey(zone); auto zoneValueRet = doGet(std::move(zoneKey)); - if (!zoneValueRet.ok()) { - LOG(ERROR) << "Get zone " << zone << " failed"; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + if (!nebula::ok(zoneValueRet)) { + LOG(ERROR) << "Get zone " << zone << " failed."; + handleErrorCode(nebula::error(zoneValueRet)); onFinished(); return; } - auto hosts = MetaServiceUtils::parseZoneHosts(std::move(zoneValueRet).value()); + auto hosts = MetaServiceUtils::parseZoneHosts(std::move(nebula::value(zoneValueRet))); for (auto& host : hosts) { auto hostIter = hostLoading_.find(host); if (hostIter == hostLoading_.end()) { @@ -162,7 +168,7 @@ void CreateSpaceProcessor::process(const cpp2::CreateSpaceReq& req) { for (auto partId = 1; partId <= partitionNum; partId++) { auto pickedZonesRet = pickLightLoadZones(replicaFactor); if (!pickedZonesRet.ok()) { - LOG(ERROR) << "Pick zone failed"; + LOG(ERROR) << "Pick zone failed."; handleErrorCode(cpp2::ErrorCode::E_INVALID_PARM); onFinished(); return; @@ -171,7 +177,7 @@ void CreateSpaceProcessor::process(const cpp2::CreateSpaceReq& req) { auto pickedZones = std::move(pickedZonesRet).value(); auto partHostsRet = pickHostsWithZone(pickedZones, zoneHosts); if (!partHostsRet.ok()) { - LOG(ERROR) << "Pick hosts with zone failed"; + LOG(ERROR) << "Pick hosts with zone failed."; handleErrorCode(cpp2::ErrorCode::E_INVALID_PARM); onFinished(); return; @@ -189,7 +195,16 @@ void CreateSpaceProcessor::process(const cpp2::CreateSpaceReq& req) { MetaServiceUtils::partVal(partHosts)); } } else { - auto hosts = ActiveHostsMan::getActiveHosts(kvstore_); + auto hostsRet = ActiveHostsMan::getActiveHosts(kvstore_); + if (!nebula::ok(hostsRet)) { + auto retCode = nebula::error(hostsRet); + LOG(ERROR) << "Create Space Failed when get active host, error " + << static_cast(retCode); + handleErrorCode(retCode); + onFinished(); + return; + } + auto hosts = std::move(nebula::value(hostsRet)); if (hosts.empty()) { LOG(ERROR) << "Create Space Failed : No Hosts!"; handleErrorCode(cpp2::ErrorCode::E_NO_HOSTS); @@ -212,7 +227,6 @@ void CreateSpaceProcessor::process(const cpp2::CreateSpaceReq& req) { } } - handleErrorCode(cpp2::ErrorCode::SUCCEEDED); resp_.set_id(to(spaceId, EntryType::SPACE)); doSyncPutAndUpdate(std::move(data)); LOG(INFO) << "Create space " << spaceName; @@ -231,17 +245,18 @@ CreateSpaceProcessor::pickHosts(PartitionID partId, return pickedHosts; } -StatusOr> +ErrorOr> CreateSpaceProcessor::getHostLoading() { - std::unique_ptr iter; const auto& prefix = MetaServiceUtils::partPrefix(); - auto code = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (code != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "List Parts Failed"; - return Status::Error("List Parts Failed"); + auto iterRet = doPrefix(prefix); + + if (!nebula::ok(iterRet)) { + LOG(ERROR) << "Prefix Parts Failed"; + return nebula::error(iterRet); } std::unordered_map result; + auto iter = nebula::value(iterRet).get(); while (iter->valid()) { auto hosts = MetaServiceUtils::parsePartVal(iter->val()); for (auto& host : hosts) { diff --git a/src/meta/processors/partsMan/CreateSpaceProcessor.h b/src/meta/processors/partsMan/CreateSpaceProcessor.h index 3c745ef1c..3a65e975e 100644 --- a/src/meta/processors/partsMan/CreateSpaceProcessor.h +++ b/src/meta/processors/partsMan/CreateSpaceProcessor.h @@ -36,7 +36,7 @@ class CreateSpaceProcessor : public BaseProcessor { const std::unordered_map& zoneHosts); // Get all host's part loading - StatusOr> getHostLoading(); + ErrorOr> getHostLoading(); // Get the zones with the least load StatusOr> diff --git a/src/meta/processors/partsMan/DropSpaceProcessor.cpp b/src/meta/processors/partsMan/DropSpaceProcessor.cpp index 97a2e4896..4c3e2f201 100644 --- a/src/meta/processors/partsMan/DropSpaceProcessor.cpp +++ b/src/meta/processors/partsMan/DropSpaceProcessor.cpp @@ -12,47 +12,64 @@ namespace meta { void DropSpaceProcessor::process(const cpp2::DropSpaceReq& req) { folly::SharedMutex::ReadHolder rHolder(LockUtils::snapshotLock()); folly::SharedMutex::WriteHolder wHolder(LockUtils::spaceLock()); - auto spaceRet = getSpaceId(req.get_space_name()); + const auto& spaceName = req.get_space_name(); + auto spaceRet = getSpaceId(spaceName); - if (!spaceRet.ok()) { - handleErrorCode(req.get_if_exists() ? cpp2::ErrorCode::SUCCEEDED : - MetaCommon::to(spaceRet.status())); + if (!nebula::ok(spaceRet)) { + auto retCode = nebula::error(spaceRet); + if (retCode == cpp2::ErrorCode::E_NOT_FOUND) { + if (req.get_if_exists()) { + retCode = cpp2::ErrorCode::SUCCEEDED; + } else { + LOG(ERROR) << "Drop space Failed, space " << spaceName << " not existed."; + } + } else { + LOG(ERROR) << "Drop space Failed, space " << spaceName + << " error: " << static_cast(retCode); + } + handleErrorCode(retCode); onFinished(); return; } - auto spaceId = spaceRet.value(); - handleErrorCode(cpp2::ErrorCode::SUCCEEDED); + auto spaceId = nebula::value(spaceRet); std::vector deleteKeys; + // delete related part meta data. auto prefix = MetaServiceUtils::partPrefix(spaceId); - std::unique_ptr iter; - auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (ret != kvstore::ResultCode::SUCCEEDED) { - handleErrorCode(MetaCommon::to(ret)); + auto iterRet = doPrefix(prefix); + if (!nebula::ok(iterRet)) { + auto retCode = nebula::error(iterRet); + LOG(ERROR) << "Drop space Failed, space " << spaceName + << " error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } + auto iter = nebula::value(iterRet).get(); while (iter->valid()) { deleteKeys.emplace_back(iter->key()); iter->next(); } - deleteKeys.emplace_back(MetaServiceUtils::indexSpaceKey(req.get_space_name())); + deleteKeys.emplace_back(MetaServiceUtils::indexSpaceKey(spaceName)); deleteKeys.emplace_back(MetaServiceUtils::spaceKey(spaceId)); // delete related role data. auto rolePrefix = MetaServiceUtils::roleSpacePrefix(spaceId); - std::unique_ptr roleIter; - auto roleRet = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, rolePrefix, &roleIter); - if (roleRet != kvstore::ResultCode::SUCCEEDED) { - handleErrorCode(MetaCommon::to(roleRet)); + auto roleRet = doPrefix(rolePrefix); + if (!nebula::ok(roleRet)) { + auto retCode = nebula::error(roleRet); + LOG(ERROR) << "Drop space Failed, space " << spaceName + << " error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } + + auto roleIter = nebula::value(roleRet).get(); while (roleIter->valid()) { - auto user = MetaServiceUtils::parseRoleUser(roleIter->key()); VLOG(3) << "Revoke role " << MetaServiceUtils::parseRoleStr(roleIter->val()) << " for user " @@ -63,13 +80,17 @@ void DropSpaceProcessor::process(const cpp2::DropSpaceReq& req) { // delete listener meta data auto lstPrefix = MetaServiceUtils::listenerPrefix(spaceId); - std::unique_ptr lstIter; - auto listenerRet = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, lstPrefix, &lstIter); - if (listenerRet != kvstore::ResultCode::SUCCEEDED) { - handleErrorCode(MetaCommon::to(listenerRet)); + auto lstRet = doPrefix(rolePrefix); + if (!nebula::ok(lstRet)) { + auto retCode = nebula::error(lstRet); + LOG(ERROR) << "Drop space Failed, space " << spaceName + << " error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } + + auto lstIter = nebula::value(lstRet).get(); while (lstIter->valid()) { deleteKeys.emplace_back(lstIter->key()); lstIter->next(); @@ -80,7 +101,7 @@ void DropSpaceProcessor::process(const cpp2::DropSpaceReq& req) { deleteKeys.emplace_back(statiskey); doSyncMultiRemoveAndUpdate(std::move(deleteKeys)); - LOG(INFO) << "Drop space " << req.get_space_name() << ", id " << spaceId; + LOG(INFO) << "Drop space " << spaceName << ", id " << spaceId; } } // namespace meta diff --git a/src/meta/processors/partsMan/GetPartsAllocProcessor.cpp b/src/meta/processors/partsMan/GetPartsAllocProcessor.cpp index da757e4d4..f270370ee 100644 --- a/src/meta/processors/partsMan/GetPartsAllocProcessor.cpp +++ b/src/meta/processors/partsMan/GetPartsAllocProcessor.cpp @@ -13,15 +13,17 @@ void GetPartsAllocProcessor::process(const cpp2::GetPartsAllocReq& req) { folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); auto spaceId = req.get_space_id(); auto prefix = MetaServiceUtils::partPrefix(spaceId); - std::unique_ptr iter; - auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (ret != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Get parts failed"; - handleErrorCode(MetaCommon::to(ret)); + auto iterRet = doPrefix(prefix); + if (!nebula::ok(iterRet)) { + auto retCode = nebula::error(iterRet); + LOG(ERROR) << "Get parts failed, error " << static_cast(retCode);; + handleErrorCode(retCode); onFinished(); return; } - std::unordered_map> parts; + + auto iter = nebula::value(iterRet).get(); + std::unordered_map> parts; while (iter->valid()) { auto key = iter->key(); PartitionID partId; diff --git a/src/meta/processors/partsMan/GetSpaceProcessor.cpp b/src/meta/processors/partsMan/GetSpaceProcessor.cpp index feae252df..02925a3ef 100644 --- a/src/meta/processors/partsMan/GetSpaceProcessor.cpp +++ b/src/meta/processors/partsMan/GetSpaceProcessor.cpp @@ -11,28 +11,36 @@ namespace meta { void GetSpaceProcessor::process(const cpp2::GetSpaceReq& req) { folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); - auto spaceRet = getSpaceId(req.get_space_name()); - if (!spaceRet.ok()) { - handleErrorCode(MetaCommon::to(spaceRet.status())); + const auto& spaceName = req.get_space_name(); + + auto spaceRet = getSpaceId(spaceName); + if (!nebula::ok(spaceRet)) { + auto retCode = nebula::error(spaceRet); + LOG(ERROR) << "Get space Failed, SpaceName " << spaceName + << " error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } - auto spaceId = spaceRet.value(); + auto spaceId = nebula::value(spaceRet); std::string spaceKey = MetaServiceUtils::spaceKey(spaceId); auto ret = doGet(spaceKey); - if (!ret.ok()) { - LOG(ERROR) << "Get Space SpaceName: " << req.get_space_name() << " not found"; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + if (!nebula::ok(ret)) { + auto retCode = nebula::error(ret); + LOG(ERROR) << "Get Space SpaceName: " << spaceName + << " error: " << static_cast(retCode);; + handleErrorCode(retCode); onFinished(); return; } - auto properties = MetaServiceUtils::parseSpace(ret.value()); - VLOG(3) << "Get Space SpaceName: " << req.get_space_name() + + auto properties = MetaServiceUtils::parseSpace(nebula::value(ret)); + VLOG(3) << "Get Space SpaceName: " << spaceName << ", Partition Num " << properties.get_partition_num() << ", Replica Factor " << properties.get_replica_factor(); if (properties.group_name_ref().has_value()) { - LOG(INFO) << "Space " << req.get_space_name() + LOG(INFO) << "Space " << spaceName << " is bind to the group " << *properties.group_name_ref(); } @@ -47,4 +55,3 @@ void GetSpaceProcessor::process(const cpp2::GetSpaceReq& req) { } // namespace meta } // namespace nebula - diff --git a/src/meta/processors/partsMan/ListHostsProcessor.cpp b/src/meta/processors/partsMan/ListHostsProcessor.cpp index 8c6267ea1..6e3a58bfd 100644 --- a/src/meta/processors/partsMan/ListHostsProcessor.cpp +++ b/src/meta/processors/partsMan/ListHostsProcessor.cpp @@ -31,28 +31,28 @@ static cpp2::HostRole toHostRole(cpp2::ListHostType type) { } void ListHostsProcessor::process(const cpp2::ListHostsReq& req) { - Status status; + cpp2::ErrorCode retCode; { folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); - auto spaceRet = getSpaceIdNameMap(); - if (!spaceRet.ok()) { + retCode = getSpaceIdNameMap(); + if (retCode != cpp2::ErrorCode::SUCCEEDED) { + handleErrorCode(retCode); onFinished(); return; } meta::cpp2::ListHostType type = req.get_type(); if (type == cpp2::ListHostType::ALLOC) { - status = fillLeaderAndPartInfoPerHost(); + retCode = fillLeaderAndPartInfoPerHost(); } else { auto hostRole = toHostRole(type); - status = allHostsWithStatus(hostRole); + retCode = allHostsWithStatus(hostRole); } } - if (status.ok()) { - handleErrorCode(cpp2::ErrorCode::SUCCEEDED); + if (retCode == cpp2::ErrorCode::SUCCEEDED) { resp_.set_hosts(std::move(hostItems_)); } - + handleErrorCode(retCode); onFinished(); } @@ -63,10 +63,12 @@ void ListHostsProcessor::process(const cpp2::ListHostsReq& req) { * which return a bunch of host infomation * it's not necessary add this interface only for gitInfoSHA * */ -Status ListHostsProcessor::allMetaHostsStatus() { +cpp2::ErrorCode ListHostsProcessor::allMetaHostsStatus() { auto errOrPart = kvstore_->part(kDefaultSpaceId, kDefaultPartId); if (!nebula::ok(errOrPart)) { - return Status::SpaceNotFound(); + auto retCode = MetaCommon::to(nebula::error(errOrPart)); + LOG(ERROR) << "List Hosts Failed, error: " << static_cast(retCode); + return retCode; } auto metaPeers = nebula::value(errOrPart)->peers(); // transform raft port to servre port @@ -81,33 +83,35 @@ Status ListHostsProcessor::allMetaHostsStatus() { item.set_status(cpp2::HostStatus::ONLINE); hostItems_.emplace_back(item); } - return Status::OK(); + return cpp2::ErrorCode::SUCCEEDED; } -Status ListHostsProcessor::allHostsWithStatus(cpp2::HostRole role) { +cpp2::ErrorCode ListHostsProcessor::allHostsWithStatus(cpp2::HostRole role) { if (role == cpp2::HostRole::META) { return allMetaHostsStatus(); } const auto& hostPrefix = MetaServiceUtils::hostPrefix(); - std::unique_ptr iter; - auto kvRet = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, hostPrefix, &iter); - if (kvRet != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "List Hosts Failed: No hosts"; - handleErrorCode(cpp2::ErrorCode::E_NO_HOSTS); - return Status::Error("Can't access kvstore, ret = %d", static_cast(kvRet)); + auto ret = doPrefix(hostPrefix); + if (!nebula::ok(ret)) { + auto retCode = nebula::error(ret); + LOG(ERROR) << "List Hosts Failed, error: " << static_cast(retCode); + return retCode; } + auto iter = nebula::value(ret).get(); auto now = time::WallClock::fastNowInMilliSec(); std::vector removeHostsKey; while (iter->valid()) { - cpp2::HostItem item; - auto host = MetaServiceUtils::parseHostKey(iter->key()); - item.set_hostAddr(std::move(host)); HostInfo info = HostInfo::decode(iter->val()); if (info.role_ != role) { iter->next(); continue; } + + cpp2::HostItem item; + auto host = MetaServiceUtils::parseHostKey(iter->key()); + item.set_hostAddr(std::move(host)); + item.set_role(info.role_); item.set_git_info_sha(info.gitInfoSha_); if (now - info.lastHBTimeInMilliSec_ < FLAGS_removed_threshold_sec * 1000) { @@ -127,27 +131,31 @@ Status ListHostsProcessor::allHostsWithStatus(cpp2::HostRole role) { } removeExpiredHosts(std::move(removeHostsKey)); - return Status::OK(); + return cpp2::ErrorCode::SUCCEEDED; } -Status ListHostsProcessor::fillLeaderAndPartInfoPerHost() { - auto status = allHostsWithStatus(cpp2::HostRole::STORAGE); - if (!status.ok()) { +cpp2::ErrorCode ListHostsProcessor::fillLeaderAndPartInfoPerHost() { + auto retCode = allHostsWithStatus(cpp2::HostRole::STORAGE); + if (retCode != cpp2::ErrorCode::SUCCEEDED) { LOG(ERROR) << "Get all host's status failed"; - return status; + return retCode; } - std::unique_ptr iter; const auto& leaderPrefix = MetaServiceUtils::leaderPrefix(); - auto kvRet = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, leaderPrefix, &iter); - if (kvRet != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "List Hosts Failed: No leaders"; - handleErrorCode(cpp2::ErrorCode::E_NO_HOSTS); - return Status::Error("Can't access kvstore, ret = %d", static_cast(kvRet)); + auto iterRet = doPrefix(leaderPrefix); + if (!nebula::ok(iterRet)) { + retCode = nebula::error(iterRet); + LOG(ERROR) << "List leader Hosts Failed, error: " << static_cast(retCode); + return retCode; } // get hosts which have send heartbeat recently - auto activeHosts = ActiveHostsMan::getActiveHosts(kvstore_); + auto activeHostsRet = ActiveHostsMan::getActiveHosts(kvstore_); + if (!nebula::ok(activeHostsRet)) { + return nebula::error(activeHostsRet); + } + auto activeHosts = nebula::value(activeHostsRet); + auto iter = nebula::value(iterRet).get(); while (iter->valid()) { auto host = MetaServiceUtils::parseLeaderKey(iter->key()); if (std::find(activeHosts.begin(), activeHosts.end(), host) != activeHosts.end()) { @@ -161,6 +169,7 @@ Status ListHostsProcessor::fillLeaderAndPartInfoPerHost() { } iter->next(); } + std::unordered_map>> allParts; for (const auto& spaceId : spaceIds_) { @@ -169,19 +178,22 @@ Status ListHostsProcessor::fillLeaderAndPartInfoPerHost() { std::unordered_map> hostParts; const auto& partPrefix = MetaServiceUtils::partPrefix(spaceId); - kvRet = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, partPrefix, &iter); - if (kvRet != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "List Hosts Failed: No partitions"; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); - return Status::Error("Can't find any partitions"); + auto iterPartRet = doPrefix(partPrefix); + if (!nebula::ok(iterPartRet)) { + retCode = nebula::error(iterPartRet); + LOG(ERROR) << "List part failed in list hosts, error: " + << static_cast(retCode); + return retCode; } - while (iter->valid()) { - PartitionID partId = MetaServiceUtils::parsePartKeyPartId(iter->key()); - auto partHosts = MetaServiceUtils::parsePartVal(iter->val()); + + auto partIter = nebula::value(iterPartRet).get(); + while (partIter->valid()) { + PartitionID partId = MetaServiceUtils::parsePartKeyPartId(partIter->key()); + auto partHosts = MetaServiceUtils::parsePartVal(partIter->val()); for (auto& host : partHosts) { hostParts[host].emplace_back(partId); } - iter->next(); + partIter->next(); } for (const auto& hostEntry : hostParts) { @@ -199,7 +211,7 @@ Status ListHostsProcessor::fillLeaderAndPartInfoPerHost() { } } - return Status::OK(); + return cpp2::ErrorCode::SUCCEEDED; } // Remove hosts that long time at OFFLINE status @@ -217,23 +229,24 @@ void ListHostsProcessor::removeExpiredHosts(std::vector&& removeHos }); } -Status ListHostsProcessor::getSpaceIdNameMap() { +cpp2::ErrorCode ListHostsProcessor::getSpaceIdNameMap() { // Get all spaces const auto& spacePrefix = MetaServiceUtils::spacePrefix(); - std::unique_ptr iter; - auto kvRet = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, spacePrefix, &iter); - if (kvRet != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "List Hosts Failed: No space found"; - handleErrorCode(cpp2::ErrorCode::E_NO_HOSTS); - return Status::Error("Can't access kvstore, ret = %d", static_cast(kvRet)); + auto iterRet = doPrefix(spacePrefix); + if (!nebula::ok(iterRet)) { + auto retCode = nebula::error(iterRet); + LOG(ERROR) << "List Hosts Failed, error " << static_cast(retCode); + return retCode; } + + auto iter = nebula::value(iterRet).get(); while (iter->valid()) { auto spaceId = MetaServiceUtils::spaceId(iter->key()); spaceIds_.emplace_back(spaceId); spaceIdNameMap_.emplace(spaceId, MetaServiceUtils::spaceName(iter->val())); iter->next(); } - return Status::OK(); + return cpp2::ErrorCode::SUCCEEDED; } std::unordered_map> diff --git a/src/meta/processors/partsMan/ListHostsProcessor.h b/src/meta/processors/partsMan/ListHostsProcessor.h index 159ff5704..b3b24f170 100644 --- a/src/meta/processors/partsMan/ListHostsProcessor.h +++ b/src/meta/processors/partsMan/ListHostsProcessor.h @@ -27,28 +27,29 @@ class ListHostsProcessor : public BaseProcessor { /** * return online/offline, gitInfoSHA for the specific HostRole * */ - Status allHostsWithStatus(cpp2::HostRole type); + cpp2::ErrorCode allHostsWithStatus(cpp2::HostRole type); // the show leader, partition info (nebula 1.0 edition) - Status fillLeaderAndPartInfoPerHost(); + cpp2::ErrorCode fillLeaderAndPartInfoPerHost(); /** * Get gitInfoSHA from all meta hosts gitInfoSHA * now, assume of of them are equal * */ - Status allMetaHostsStatus(); + cpp2::ErrorCode allMetaHostsStatus(); // Get map of spaceId -> spaceName - Status getSpaceIdNameMap(); + cpp2::ErrorCode getSpaceIdNameMap(); std::unordered_map> getLeaderPartsWithSpaceName(const LeaderParts& leaderParts); void removeExpiredHosts(std::vector&& removeHostsKey); - std::vector spaceIds_; +private: + std::vector spaceIds_; std::unordered_map spaceIdNameMap_; - std::vector hostItems_; + std::vector hostItems_; }; } // namespace meta diff --git a/src/meta/processors/partsMan/ListPartsProcessor.cpp b/src/meta/processors/partsMan/ListPartsProcessor.cpp index 9ce927478..7642b923f 100644 --- a/src/meta/processors/partsMan/ListPartsProcessor.cpp +++ b/src/meta/processors/partsMan/ListPartsProcessor.cpp @@ -25,23 +25,38 @@ void ListPartsProcessor::process(const cpp2::ListPartsReq& req) { for (const auto& partId : partIds_) { auto partKey = MetaServiceUtils::partKey(spaceId_, partId); std::string value; - auto retCode = kvstore_->get(kDefaultSpaceId, kDefaultPartId, partKey, &value); - if (retCode == kvstore::ResultCode::SUCCEEDED) { - partHostsMap[partId] = MetaServiceUtils::parsePartVal(value); + auto ret = doGet(std::move(partKey)); + if (!nebula::ok(ret)) { + auto retCode = nebula::error(ret); + LOG(ERROR) << "Get part failed, error " << static_cast(retCode); + handleErrorCode(retCode); + onFinished(); + return; } + auto hosts = std::move(nebula::value(ret)); + partHostsMap[partId] = MetaServiceUtils::parsePartVal(value); } } else { // Show all parts folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); - auto status = getAllParts(); - if (!status.ok()) { + auto ret = getAllParts(); + if (!nebula::ok(ret)) { + handleErrorCode(nebula::error(ret)); onFinished(); return; } - partHostsMap = std::move(status).value(); + partHostsMap = std::move(nebula::value(ret)); } + std::vector partItems; - auto activeHosts = ActiveHostsMan::getActiveHosts(kvstore_); + auto activeHostsRet = ActiveHostsMan::getActiveHosts(kvstore_); + if (!nebula::ok(activeHostsRet)) { + handleErrorCode(nebula::error(activeHostsRet)); + onFinished(); + return; + } + auto activeHosts = std::move(nebula::value(activeHostsRet)); + for (auto& partEntry : partHostsMap) { cpp2::PartItem partItem; partItem.set_part_id(partEntry.first); @@ -60,27 +75,30 @@ void ListPartsProcessor::process(const cpp2::ListPartsReq& req) { if (partItems.size() != partHostsMap.size()) { LOG(ERROR) << "Maybe lost some partitions!"; } - getLeaderDist(partItems); - handleErrorCode(cpp2::ErrorCode::SUCCEEDED); - resp_.set_parts(std::move(partItems)); + auto retCode = getLeaderDist(partItems); + if (retCode == cpp2::ErrorCode::SUCCEEDED) { + resp_.set_parts(std::move(partItems)); + } + handleErrorCode(retCode); onFinished(); } -StatusOr>> +ErrorOr>> ListPartsProcessor::getAllParts() { std::unordered_map> partHostsMap; folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); - auto prefix = MetaServiceUtils::partPrefix(spaceId_); - std::unique_ptr iter; - auto kvRet = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (kvRet != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "List Parts Failed: No parts"; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); - return Status::Error("Can't access kvstore, ret = %d", static_cast(kvRet)); + const auto& prefix = MetaServiceUtils::partPrefix(spaceId_); + auto ret = doPrefix(prefix); + if (!nebula::ok(ret)) { + auto retCode = nebula::error(ret); + LOG(ERROR) << "List Parts Failed, error: " + << static_cast(retCode); + return retCode; } + auto iter = nebula::value(ret).get(); while (iter->valid()) { auto key = iter->key(); PartitionID partId; @@ -94,16 +112,24 @@ ListPartsProcessor::getAllParts() { } -void ListPartsProcessor::getLeaderDist(std::vector& partItems) { - const auto& hostPrefix = MetaServiceUtils::leaderPrefix(); - std::unique_ptr iter; - auto kvRet = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, hostPrefix, &iter); - if (kvRet != kvstore::ResultCode::SUCCEEDED) { - return; +cpp2::ErrorCode ListPartsProcessor::getLeaderDist(std::vector& partItems) { + const auto& leaderPrefix = MetaServiceUtils::leaderPrefix(); + auto ret = doPrefix(leaderPrefix); + if (!nebula::ok(ret)) { + auto retCode = nebula::error(ret); + LOG(ERROR) << "Get leader host failed, error " + << static_cast(retCode); + return retCode; } // get hosts which have send heartbeat recently - auto activeHosts = ActiveHostsMan::getActiveHosts(kvstore_); + auto activeHostsRet = ActiveHostsMan::getActiveHosts(kvstore_); + if (!nebula::ok(activeHostsRet)) { + return nebula::error(activeHostsRet); + } + + auto activeHosts = std::move(nebula::value(activeHostsRet)); + auto iter = nebula::value(ret).get(); while (iter->valid()) { auto host = MetaServiceUtils::parseLeaderKey(iter->key()); if (std::find(activeHosts.begin(), activeHosts.end(), host) != activeHosts.end()) { @@ -118,6 +144,8 @@ void ListPartsProcessor::getLeaderDist(std::vector& partItems) { } iter->next(); } + + return cpp2::ErrorCode::SUCCEEDED; } } // namespace meta diff --git a/src/meta/processors/partsMan/ListPartsProcessor.h b/src/meta/processors/partsMan/ListPartsProcessor.h index 38d4cf4eb..54c55b3f7 100644 --- a/src/meta/processors/partsMan/ListPartsProcessor.h +++ b/src/meta/processors/partsMan/ListPartsProcessor.h @@ -27,10 +27,11 @@ class ListPartsProcessor : public BaseProcessor { // Get parts alloc information - StatusOr>> getAllParts(); + ErrorOr>> + getAllParts(); // Get all parts with storage leader distribution - void getLeaderDist(std::vector& partItems); + cpp2::ErrorCode getLeaderDist(std::vector& partItems); private: GraphSpaceID spaceId_; diff --git a/src/meta/processors/partsMan/ListSpacesProcessor.cpp b/src/meta/processors/partsMan/ListSpacesProcessor.cpp index 90bddf521..d185a7975 100644 --- a/src/meta/processors/partsMan/ListSpacesProcessor.cpp +++ b/src/meta/processors/partsMan/ListSpacesProcessor.cpp @@ -11,15 +11,18 @@ namespace meta { void ListSpacesProcessor::process(const cpp2::ListSpacesReq&) { folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); - auto prefix = MetaServiceUtils::spacePrefix(); - std::unique_ptr iter; - auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (ret != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "List spaces failed"; - handleErrorCode(MetaCommon::to(ret)); + const auto& prefix = MetaServiceUtils::spacePrefix(); + auto ret = doPrefix(prefix); + if (!nebula::ok(ret)) { + auto retCode = nebula::error(ret); + LOG(ERROR) << "List spaces failed, error " + << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } + auto iter = nebula::value(ret).get(); + std::vector spaces; while (iter->valid()) { auto spaceId = MetaServiceUtils::spaceId(iter->key()); diff --git a/src/meta/processors/schemaMan/AlterEdgeProcessor.cpp b/src/meta/processors/schemaMan/AlterEdgeProcessor.cpp index 61c5678ac..7c1bc9133 100644 --- a/src/meta/processors/schemaMan/AlterEdgeProcessor.cpp +++ b/src/meta/processors/schemaMan/AlterEdgeProcessor.cpp @@ -11,26 +11,39 @@ namespace nebula { namespace meta { void AlterEdgeProcessor::process(const cpp2::AlterEdgeReq& req) { - CHECK_SPACE_ID_AND_RETURN(req.get_space_id()); GraphSpaceID spaceId = req.get_space_id(); + CHECK_SPACE_ID_AND_RETURN(spaceId); + auto edgeName = req.get_edge_name(); + folly::SharedMutex::ReadHolder rHolder(LockUtils::snapshotLock()); folly::SharedMutex::WriteHolder wHolder(LockUtils::edgeLock()); - auto ret = getEdgeType(spaceId, req.get_edge_name()); - if (!ret.ok()) { - handleErrorCode(MetaCommon::to(ret.status())); + auto ret = getEdgeType(spaceId, edgeName); + if (!nebula::ok(ret)) { + auto retCode = nebula::error(ret); + LOG(ERROR) << "Failed to get edge " << edgeName << " error " + << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } - auto edgeType = ret.value(); + auto edgeType = nebula::value(ret); // Check the edge belongs to the space - std::unique_ptr iter; auto edgePrefix = MetaServiceUtils::schemaEdgePrefix(spaceId, edgeType); - auto code = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, edgePrefix, &iter); - if (code != kvstore::ResultCode::SUCCEEDED || !iter->valid()) { - LOG(ERROR) << "Edge could not be found " << req.get_edge_name() - << ", spaceId " << spaceId - << ", edgeType " << edgeType; + auto retPre = doPrefix(edgePrefix); + if (!nebula::ok(retPre)) { + auto retCode = nebula::error(retPre); + LOG(ERROR) << "Edge Prefix failed, edgename: " << edgeName + << ", spaceId " << spaceId << " error " + << static_cast(retCode); + handleErrorCode(retCode); + onFinished(); + return; + } + auto iter = nebula::value(retPre).get(); + if (!iter->valid()) { + LOG(ERROR) << "Edge could not be found, spaceId " << spaceId + << ", edgename: " << edgeName; handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); onFinished(); return; @@ -46,12 +59,13 @@ void AlterEdgeProcessor::process(const cpp2::AlterEdgeReq& req) { auto& edgeItems = req.get_edge_items(); auto iRet = getIndexes(spaceId, edgeType); - if (!iRet.ok()) { - handleErrorCode(MetaCommon::to(iRet.status())); + if (!nebula::ok(iRet)) { + handleErrorCode(nebula::error(iRet)); onFinished(); return; } - auto indexes = std::move(iRet).value(); + + auto indexes = std::move(nebula::value(iRet)); auto existIndex = !indexes.empty(); if (existIndex) { auto iStatus = indexCheck(indexes, edgeItems); @@ -97,11 +111,10 @@ void AlterEdgeProcessor::process(const cpp2::AlterEdgeReq& req) { schema.set_columns(std::move(columns)); std::vector data; - LOG(INFO) << "Alter edge " << req.get_edge_name() << ", edgeType " << edgeType; + LOG(INFO) << "Alter edge " << edgeName << ", edgeType " << edgeType; data.emplace_back(MetaServiceUtils::schemaEdgeKey(spaceId, edgeType, version), - MetaServiceUtils::schemaVal(req.get_edge_name(), schema)); + MetaServiceUtils::schemaVal(edgeName, schema)); resp_.set_id(to(edgeType, EntryType::EDGE)); - handleErrorCode(cpp2::ErrorCode::SUCCEEDED); doSyncPutAndUpdate(std::move(data)); } diff --git a/src/meta/processors/schemaMan/AlterTagProcessor.cpp b/src/meta/processors/schemaMan/AlterTagProcessor.cpp index 9adfe2b65..3b52486a3 100644 --- a/src/meta/processors/schemaMan/AlterTagProcessor.cpp +++ b/src/meta/processors/schemaMan/AlterTagProcessor.cpp @@ -11,26 +11,39 @@ namespace nebula { namespace meta { void AlterTagProcessor::process(const cpp2::AlterTagReq& req) { - CHECK_SPACE_ID_AND_RETURN(req.get_space_id()); GraphSpaceID spaceId = req.get_space_id(); + CHECK_SPACE_ID_AND_RETURN(spaceId); + auto tagName = req.get_tag_name(); + folly::SharedMutex::ReadHolder rHolder(LockUtils::snapshotLock()); folly::SharedMutex::WriteHolder wHolder(LockUtils::tagLock()); - auto ret = getTagId(spaceId, req.get_tag_name()); - if (!ret.ok()) { - handleErrorCode(MetaCommon::to(ret.status())); + auto ret = getTagId(spaceId, tagName); + if (!nebula::ok(ret)) { + auto retCode = nebula::error(ret); + LOG(ERROR) << "Failed to get tag " << tagName << " error " + << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } - auto tagId = ret.value(); + auto tagId = nebula::value(ret); // Check the tag belongs to the space - std::unique_ptr iter; auto tagPrefix = MetaServiceUtils::schemaTagPrefix(spaceId, tagId); - auto code = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, tagPrefix, &iter); - if (code != kvstore::ResultCode::SUCCEEDED || !iter->valid()) { - LOG(ERROR) << "Tag could not be found " << req.get_tag_name() - << ", spaceId " << spaceId - << ", tagId " << tagId; + auto retPre = doPrefix(tagPrefix); + if (!nebula::ok(retPre)) { + auto retCode = nebula::error(retPre); + LOG(ERROR) << "Tag Prefix failed, tagname: " << tagName + << ", spaceId " << spaceId << " error: " + << static_cast(retCode); + handleErrorCode(retCode); + onFinished(); + return; + } + auto iter = nebula::value(retPre).get(); + if (!iter->valid()) { + LOG(ERROR) << "Tag could not be found, spaceId " << spaceId + << ", tagname: " << tagName; handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); onFinished(); return; @@ -46,12 +59,13 @@ void AlterTagProcessor::process(const cpp2::AlterTagReq& req) { auto& tagItems = req.get_tag_items(); auto iCode = getIndexes(spaceId, tagId); - if (!iCode.ok()) { - handleErrorCode(MetaCommon::to(iCode.status())); + if (!nebula::ok(iCode)) { + handleErrorCode(nebula::error(iCode)); onFinished(); return; } - auto indexes = std::move(iCode).value(); + + auto indexes = std::move(nebula::value(iCode)); auto existIndex = !indexes.empty(); if (existIndex) { auto iStatus = indexCheck(indexes, tagItems); @@ -98,11 +112,10 @@ void AlterTagProcessor::process(const cpp2::AlterTagReq& req) { schema.set_columns(std::move(columns)); std::vector data; - LOG(INFO) << "Alter Tag " << req.get_tag_name() << ", tagId " << tagId; + LOG(INFO) << "Alter Tag " << tagName << ", tagId " << tagId; data.emplace_back(MetaServiceUtils::schemaTagKey(spaceId, tagId, version), - MetaServiceUtils::schemaVal(req.get_tag_name(), schema)); + MetaServiceUtils::schemaVal(tagName, schema)); resp_.set_id(to(tagId, EntryType::TAG)); - handleErrorCode(cpp2::ErrorCode::SUCCEEDED); doSyncPutAndUpdate(std::move(data)); } diff --git a/src/meta/processors/schemaMan/CreateEdgeProcessor.cpp b/src/meta/processors/schemaMan/CreateEdgeProcessor.cpp index f095ae643..787c44e00 100644 --- a/src/meta/processors/schemaMan/CreateEdgeProcessor.cpp +++ b/src/meta/processors/schemaMan/CreateEdgeProcessor.cpp @@ -11,20 +11,30 @@ namespace nebula { namespace meta { void CreateEdgeProcessor::process(const cpp2::CreateEdgeReq& req) { - CHECK_SPACE_ID_AND_RETURN(req.get_space_id()); + GraphSpaceID spaceId = req.get_space_id(); + CHECK_SPACE_ID_AND_RETURN(spaceId); auto edgeName = req.get_edge_name(); { - // if there is an edge of the same name + // if there is an tag of the same name // TODO: there exists race condition, we should address it in the future folly::SharedMutex::ReadHolder rHolder(LockUtils::edgeLock()); - auto conflictRet = getTagId(req.get_space_id(), edgeName); - if (conflictRet.ok()) { + auto conflictRet = getTagId(spaceId, edgeName); + if (nebula::ok(conflictRet)) { LOG(ERROR) << "Failed to create edge `" << edgeName << "': some edge with the same name already exists."; - resp_.set_id(to(conflictRet.value(), EntryType::EDGE)); + resp_.set_id(to(nebula::value(conflictRet), EntryType::EDGE)); handleErrorCode(cpp2::ErrorCode::E_CONFLICT); onFinished(); return; + } else { + auto retCode = nebula::error(conflictRet); + if (retCode != cpp2::ErrorCode::E_NOT_FOUND) { + LOG(ERROR) << "Failed to create edge " << edgeName << " error " + << static_cast(retCode); + handleErrorCode(retCode); + onFinished(); + return; + } } } @@ -40,16 +50,26 @@ void CreateEdgeProcessor::process(const cpp2::CreateEdgeReq& req) { schema.set_schema_prop(req.get_schema().get_schema_prop()); folly::SharedMutex::WriteHolder wHolder(LockUtils::edgeLock()); - auto ret = getEdgeType(req.get_space_id(), edgeName); - if (ret.ok()) { + auto ret = getEdgeType(spaceId, edgeName); + if (nebula::ok(ret)) { if (req.get_if_not_exists()) { handleErrorCode(cpp2::ErrorCode::SUCCEEDED); } else { + LOG(ERROR) << "Create Edge Failed :" << edgeName << " has existed"; handleErrorCode(cpp2::ErrorCode::E_EXISTED); } - resp_.set_id(to(ret.value(), EntryType::EDGE)); + resp_.set_id(to(nebula::value(ret), EntryType::EDGE)); onFinished(); return; + } else { + auto retCode = nebula::error(ret); + if (retCode != cpp2::ErrorCode::E_NOT_FOUND) { + LOG(ERROR) << "Failed to create edge " << edgeName << " error " + << static_cast(retCode); + handleErrorCode(retCode); + onFinished(); + return; + } } auto edgeTypeRet = autoIncrementId(); @@ -59,19 +79,18 @@ void CreateEdgeProcessor::process(const cpp2::CreateEdgeReq& req) { onFinished(); return; } + auto edgeType = nebula::value(edgeTypeRet); std::vector data; - data.emplace_back(MetaServiceUtils::indexEdgeKey(req.get_space_id(), edgeName), + data.emplace_back(MetaServiceUtils::indexEdgeKey(spaceId, edgeName), std::string(reinterpret_cast(&edgeType), sizeof(EdgeType))); - data.emplace_back(MetaServiceUtils::schemaEdgeKey(req.get_space_id(), edgeType, 0), + data.emplace_back(MetaServiceUtils::schemaEdgeKey(spaceId, edgeType, 0), MetaServiceUtils::schemaVal(edgeName, schema)); LOG(INFO) << "Create Edge " << edgeName << ", edgeType " << edgeType; - handleErrorCode(cpp2::ErrorCode::SUCCEEDED); resp_.set_id(to(edgeType, EntryType::EDGE)); doSyncPutAndUpdate(std::move(data)); } } // namespace meta } // namespace nebula - diff --git a/src/meta/processors/schemaMan/CreateTagProcessor.cpp b/src/meta/processors/schemaMan/CreateTagProcessor.cpp index f37fe19e8..720f5c8d1 100644 --- a/src/meta/processors/schemaMan/CreateTagProcessor.cpp +++ b/src/meta/processors/schemaMan/CreateTagProcessor.cpp @@ -11,20 +11,30 @@ namespace nebula { namespace meta { void CreateTagProcessor::process(const cpp2::CreateTagReq& req) { - CHECK_SPACE_ID_AND_RETURN(req.get_space_id()); + GraphSpaceID spaceId = req.get_space_id(); + CHECK_SPACE_ID_AND_RETURN(spaceId); auto tagName = req.get_tag_name(); { // if there is an edge of the same name // TODO: there exists race condition, we should address it in the future folly::SharedMutex::ReadHolder rHolder(LockUtils::edgeLock()); - auto conflictRet = getEdgeType(req.get_space_id(), tagName); - if (conflictRet.ok()) { + auto conflictRet = getEdgeType(spaceId, tagName); + if (nebula::ok(conflictRet)) { LOG(ERROR) << "Failed to create tag `" << tagName << "': some edge with the same name already exists."; - resp_.set_id(to(conflictRet.value(), EntryType::TAG)); + resp_.set_id(to(nebula::value(conflictRet), EntryType::TAG)); handleErrorCode(cpp2::ErrorCode::E_CONFLICT); onFinished(); return; + } else { + auto retCode = nebula::error(conflictRet); + if (retCode != cpp2::ErrorCode::E_NOT_FOUND) { + LOG(ERROR) << "Failed to create tag " << tagName << " error " + << static_cast(retCode); + handleErrorCode(retCode); + onFinished(); + return; + } } } @@ -40,35 +50,45 @@ void CreateTagProcessor::process(const cpp2::CreateTagReq& req) { schema.set_schema_prop(req.get_schema().get_schema_prop()); folly::SharedMutex::WriteHolder wHolder(LockUtils::tagLock()); - auto ret = getTagId(req.get_space_id(), tagName); - if (ret.ok()) { + auto ret = getTagId(spaceId, tagName); + if (nebula::ok(ret)) { if (req.get_if_not_exists()) { handleErrorCode(cpp2::ErrorCode::SUCCEEDED); } else { LOG(ERROR) << "Create Tag Failed :" << tagName << " has existed"; handleErrorCode(cpp2::ErrorCode::E_EXISTED); } - resp_.set_id(to(ret.value(), EntryType::TAG)); + resp_.set_id(to(nebula::value(ret), EntryType::TAG)); onFinished(); return; + } else { + auto retCode = nebula::error(ret); + if (retCode != cpp2::ErrorCode::E_NOT_FOUND) { + LOG(ERROR) << "Failed to create tag " << tagName << " error " + << static_cast(retCode); + handleErrorCode(retCode); + onFinished(); + return; + } } auto tagRet = autoIncrementId(); if (!nebula::ok(tagRet)) { - LOG(ERROR) << "Create tag failed : Get tag id failed"; + LOG(ERROR) << "Create tag failed : Get tag id failed."; handleErrorCode(nebula::error(tagRet)); onFinished(); return; } + auto tagId = nebula::value(tagRet); std::vector data; - data.emplace_back(MetaServiceUtils::indexTagKey(req.get_space_id(), tagName), + data.emplace_back(MetaServiceUtils::indexTagKey(spaceId, tagName), std::string(reinterpret_cast(&tagId), sizeof(TagID))); - data.emplace_back(MetaServiceUtils::schemaTagKey(req.get_space_id(), tagId, 0), + data.emplace_back(MetaServiceUtils::schemaTagKey(spaceId, tagId, 0), MetaServiceUtils::schemaVal(tagName, schema)); LOG(INFO) << "Create Tag " << tagName << ", TagID " << tagId; - handleErrorCode(cpp2::ErrorCode::SUCCEEDED); + resp_.set_id(to(tagId, EntryType::TAG)); doSyncPutAndUpdate(std::move(data)); } diff --git a/src/meta/processors/schemaMan/DropEdgeProcessor.cpp b/src/meta/processors/schemaMan/DropEdgeProcessor.cpp index 008f6a43f..b47443f6b 100644 --- a/src/meta/processors/schemaMan/DropEdgeProcessor.cpp +++ b/src/meta/processors/schemaMan/DropEdgeProcessor.cpp @@ -10,60 +10,73 @@ namespace nebula { namespace meta { void DropEdgeProcessor::process(const cpp2::DropEdgeReq& req) { - CHECK_SPACE_ID_AND_RETURN(req.get_space_id()); GraphSpaceID spaceId = req.get_space_id(); + CHECK_SPACE_ID_AND_RETURN(spaceId); + folly::SharedMutex::ReadHolder rHolder(LockUtils::snapshotLock()); folly::SharedMutex::WriteHolder wHolder(LockUtils::edgeLock()); + auto edgeName = req.get_edge_name(); EdgeType edgeType; - auto indexKey = MetaServiceUtils::indexEdgeKey(spaceId, req.get_edge_name()); + auto indexKey = MetaServiceUtils::indexEdgeKey(spaceId, edgeName); auto iRet = doGet(indexKey); - if (iRet.ok()) { - edgeType = *reinterpret_cast(iRet.value().data()); + if (nebula::ok(iRet)) { + edgeType = *reinterpret_cast(nebula::value(iRet).c_str()); resp_.set_id(to(edgeType, EntryType::EDGE)); } else { - handleErrorCode(req.get_if_exists() == true ? cpp2::ErrorCode::SUCCEEDED - : cpp2::ErrorCode::E_NOT_FOUND); + auto retCode = nebula::error(iRet); + if (retCode == cpp2::ErrorCode::E_NOT_FOUND) { + if (req.get_if_exists()) { + retCode = cpp2::ErrorCode::SUCCEEDED; + } else { + LOG(ERROR) << "Drop edge failed :" << edgeName << " not found."; + } + } else { + LOG(ERROR) << "Get edgetype failed, edge name " << edgeName + << " error: " << static_cast(retCode); + } + handleErrorCode(retCode); onFinished(); return; } auto indexes = getIndexes(spaceId, edgeType); - if (!indexes.ok()) { - handleErrorCode(MetaCommon::to(indexes.status())); + if (!nebula::ok(indexes)) { + handleErrorCode(nebula::error(indexes)); onFinished(); return; } - if (!indexes.value().empty()) { - LOG(ERROR) << "Drop edge error, index conflict"; + if (!nebula::value(indexes).empty()) { + LOG(ERROR) << "Drop edge error, index conflict, please delete index first."; handleErrorCode(cpp2::ErrorCode::E_CONFLICT); onFinished(); return; } - auto ret = getEdgeKeys(req.get_space_id(), edgeType); - if (!ret.ok()) { - handleErrorCode(MetaCommon::to(ret.status())); + auto ret = getEdgeKeys(spaceId, edgeType); + if (!nebula::ok(ret)) { + handleErrorCode(nebula::error(ret)); onFinished(); return; } - handleErrorCode(cpp2::ErrorCode::SUCCEEDED); - auto keys = std::move(ret).value(); + + auto keys = nebula::value(ret); keys.emplace_back(std::move(indexKey)); - LOG(INFO) << "Drop Edge " << req.get_edge_name(); + LOG(INFO) << "Drop Edge " << edgeName; doSyncMultiRemoveAndUpdate(std::move(keys)); } -StatusOr> DropEdgeProcessor::getEdgeKeys(GraphSpaceID id, - EdgeType edgeType) { +ErrorOr> +DropEdgeProcessor::getEdgeKeys(GraphSpaceID id, EdgeType edgeType) { std::vector keys; auto key = MetaServiceUtils::schemaEdgePrefix(id, edgeType); auto iterRet = doPrefix(key); - if (!iterRet.ok()) { - return Status::Error("Edge get error by id : %d !", edgeType); + if (!nebula::ok(iterRet)) { + LOG(ERROR) << "Edge schema prefix failed, edgetype " << edgeType; + return nebula::error(iterRet); } - auto iter = iterRet.value().get(); + auto iter = nebula::value(iterRet).get(); while (iter->valid()) { keys.emplace_back(iter->key()); iter->next(); diff --git a/src/meta/processors/schemaMan/DropEdgeProcessor.h b/src/meta/processors/schemaMan/DropEdgeProcessor.h index f70b1946f..35729a0a7 100644 --- a/src/meta/processors/schemaMan/DropEdgeProcessor.h +++ b/src/meta/processors/schemaMan/DropEdgeProcessor.h @@ -24,7 +24,8 @@ class DropEdgeProcessor : public BaseProcessor { explicit DropEdgeProcessor(kvstore::KVStore* kvstore) : BaseProcessor(kvstore) {} - StatusOr> getEdgeKeys(GraphSpaceID id, EdgeType edgeType); + ErrorOr> + getEdgeKeys(GraphSpaceID id, EdgeType edgeType); }; } // namespace meta diff --git a/src/meta/processors/schemaMan/DropTagProcessor.cpp b/src/meta/processors/schemaMan/DropTagProcessor.cpp index 4a1ee7f42..d7ea7a060 100644 --- a/src/meta/processors/schemaMan/DropTagProcessor.cpp +++ b/src/meta/processors/schemaMan/DropTagProcessor.cpp @@ -10,61 +10,73 @@ namespace nebula { namespace meta { void DropTagProcessor::process(const cpp2::DropTagReq& req) { - CHECK_SPACE_ID_AND_RETURN(req.get_space_id()); GraphSpaceID spaceId = req.get_space_id(); + CHECK_SPACE_ID_AND_RETURN(spaceId); folly::SharedMutex::ReadHolder rHolder(LockUtils::snapshotLock()); folly::SharedMutex::WriteHolder wHolder(LockUtils::tagLock()); + auto tagName = req.get_tag_name(); TagID tagId; - auto indexKey = MetaServiceUtils::indexTagKey(spaceId, req.get_tag_name()); + auto indexKey = MetaServiceUtils::indexTagKey(spaceId, tagName); auto iRet = doGet(indexKey); - if (iRet.ok()) { - tagId = *reinterpret_cast(iRet.value().data()); + if (nebula::ok(iRet)) { + tagId = *reinterpret_cast(nebula::value(iRet).c_str()); resp_.set_id(to(tagId, EntryType::TAG)); } else { - handleErrorCode(req.get_if_exists() ? cpp2::ErrorCode::SUCCEEDED - : cpp2::ErrorCode::E_NOT_FOUND); + auto retCode = nebula::error(iRet); + if (retCode == cpp2::ErrorCode::E_NOT_FOUND) { + if (req.get_if_exists()) { + retCode = cpp2::ErrorCode::SUCCEEDED; + } else { + LOG(ERROR) << "Drop tag failed :" << tagName << " not found."; + } + } else { + LOG(ERROR) << "Get Tag failed, tag name " << tagName + << " error: " << static_cast(retCode); + } + handleErrorCode(retCode); onFinished(); return; } auto indexes = getIndexes(spaceId, tagId); - if (!indexes.ok()) { - handleErrorCode(MetaCommon::to(indexes.status())); + if (!nebula::ok(indexes)) { + handleErrorCode(nebula::error(indexes)); onFinished(); return; } - if (!indexes.value().empty()) { - LOG(ERROR) << "Drop tag error, index conflict"; + if (!nebula::value(indexes).empty()) { + LOG(ERROR) << "Drop tag error, index conflict, please delete index first."; handleErrorCode(cpp2::ErrorCode::E_CONFLICT); onFinished(); return; } - auto ret = getTagKeys(req.get_space_id(), tagId); - if (!ret.ok()) { - LOG(ERROR) << "Drop Tag Failed : " << req.get_tag_name() << " not found"; - handleErrorCode(MetaCommon::to(ret.status())); + auto ret = getTagKeys(spaceId, tagId); + if (!nebula::ok(ret)) { + handleErrorCode(nebula::error(ret)); onFinished(); return; } - auto keys = std::move(ret).value(); + + auto keys = nebula::value(ret); keys.emplace_back(indexKey); - handleErrorCode(cpp2::ErrorCode::SUCCEEDED); - LOG(INFO) << "Drop Tag " << req.get_tag_name(); + LOG(INFO) << "Drop Tag " << tagName; doSyncMultiRemoveAndUpdate(std::move(keys)); } -StatusOr> DropTagProcessor::getTagKeys(GraphSpaceID id, TagID tagId) { +ErrorOr> +DropTagProcessor::getTagKeys(GraphSpaceID id, TagID tagId) { std::vector keys; auto key = MetaServiceUtils::schemaTagPrefix(id, tagId); auto iterRet = doPrefix(key); - if (!iterRet.ok()) { - return Status::Error("Tag get error by id : %d !", tagId); + if (!nebula::ok(iterRet)) { + LOG(ERROR) << "Tag schema prefix failed, tag id " << tagId; + return nebula::error(iterRet); } - auto iter = iterRet.value().get(); + auto iter = nebula::value(iterRet).get(); while (iter->valid()) { keys.emplace_back(iter->key()); iter->next(); diff --git a/src/meta/processors/schemaMan/DropTagProcessor.h b/src/meta/processors/schemaMan/DropTagProcessor.h index 38a9c1a11..9cf82a0dd 100644 --- a/src/meta/processors/schemaMan/DropTagProcessor.h +++ b/src/meta/processors/schemaMan/DropTagProcessor.h @@ -24,7 +24,8 @@ class DropTagProcessor : public BaseProcessor { explicit DropTagProcessor(kvstore::KVStore* kvstore) : BaseProcessor(kvstore) {} - StatusOr> getTagKeys(GraphSpaceID id, TagID tagId); + ErrorOr> + getTagKeys(GraphSpaceID id, TagID tagId); }; } // namespace meta diff --git a/src/meta/processors/schemaMan/GetEdgeProcessor.cpp b/src/meta/processors/schemaMan/GetEdgeProcessor.cpp index d7aff0205..a4d65dd4a 100644 --- a/src/meta/processors/schemaMan/GetEdgeProcessor.cpp +++ b/src/meta/processors/schemaMan/GetEdgeProcessor.cpp @@ -10,50 +10,62 @@ namespace nebula { namespace meta { void GetEdgeProcessor::process(const cpp2::GetEdgeReq& req) { - CHECK_SPACE_ID_AND_RETURN(req.get_space_id()); + GraphSpaceID spaceId = req.get_space_id(); + CHECK_SPACE_ID_AND_RETURN(spaceId); + auto edgeName = req.get_edge_name(); + auto ver = req.get_version(); + folly::SharedMutex::ReadHolder rHolder(LockUtils::edgeLock()); - auto edgeTypeRet = getEdgeType(req.get_space_id(), req.get_edge_name()); - if (!edgeTypeRet.ok()) { - handleErrorCode(MetaCommon::to(edgeTypeRet.status())); + auto edgeTypeRet = getEdgeType(spaceId, edgeName); + if (!nebula::ok(edgeTypeRet)) { + LOG(ERROR) << "Get edge " << edgeName << " failed."; + handleErrorCode(nebula::error(edgeTypeRet)); onFinished(); return; } - auto edgeType = edgeTypeRet.value(); + auto edgeType = nebula::value(edgeTypeRet); std::string schemaValue; // Get the lastest version - if (req.get_version() < 0) { - auto edgePrefix = MetaServiceUtils::schemaEdgePrefix(req.get_space_id(), edgeType); + if (ver < 0) { + auto edgePrefix = MetaServiceUtils::schemaEdgePrefix(spaceId, edgeType); auto ret = doPrefix(edgePrefix); - if (!ret.ok()) { - LOG(ERROR) << "Get Edge SpaceID: " << req.get_space_id() << ", edgeName: " - << req.get_edge_name() << ", version " << req.get_version() << " not found"; + if (!nebula::ok(ret)) { + LOG(ERROR) << "Get Edge SpaceID: " << spaceId << ", edgeName: " + << edgeName << ", latest version failed."; + handleErrorCode(nebula::error(ret)); + onFinished(); + return; + } + auto iter = nebula::value(ret).get(); + if (!iter->valid()) { + LOG(ERROR) << "Get Edge SpaceID: " << spaceId << ", edgeName: " + << edgeName << ", latest version " << " not found."; handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); onFinished(); return; } - schemaValue = ret.value()->val().str(); + schemaValue = iter->val().str(); } else { - auto edgeKey = MetaServiceUtils::schemaEdgeKey(req.get_space_id(), - edgeType, - req.get_version()); + auto edgeKey = MetaServiceUtils::schemaEdgeKey(spaceId, edgeType, ver); auto ret = doGet(edgeKey); - if (!ret.ok()) { - LOG(ERROR) << "Get Edge SpaceID: " << req.get_space_id() << ", edgeName: " - << req.get_edge_name() << ", version " << req.get_version() << " not found"; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + if (!nebula::ok(ret)) { + LOG(ERROR) << "Get Edge SpaceID: " << spaceId << ", edgeName: " + << edgeName << ", version " << ver << " failed."; + handleErrorCode(nebula::error(ret)); onFinished(); return; } - schemaValue = ret.value(); + schemaValue = nebula::value(ret); } - VLOG(3) << "Get Edge SpaceID: " << req.get_space_id() << ", edgeName: " - << req.get_edge_name() << ", version " << req.get_version(); + VLOG(3) << "Get Edge SpaceID: " << spaceId << ", edgeName: " + << edgeName << ", version " << ver; handleErrorCode(cpp2::ErrorCode::SUCCEEDED); resp_.set_schema(MetaServiceUtils::parseSchema(schemaValue)); onFinished(); } + } // namespace meta } // namespace nebula diff --git a/src/meta/processors/schemaMan/GetTagProcessor.cpp b/src/meta/processors/schemaMan/GetTagProcessor.cpp index 2bc78a325..03906c42c 100644 --- a/src/meta/processors/schemaMan/GetTagProcessor.cpp +++ b/src/meta/processors/schemaMan/GetTagProcessor.cpp @@ -10,54 +10,63 @@ namespace nebula { namespace meta { void GetTagProcessor::process(const cpp2::GetTagReq& req) { - CHECK_SPACE_ID_AND_RETURN(req.get_space_id()); + GraphSpaceID spaceId = req.get_space_id(); + CHECK_SPACE_ID_AND_RETURN(spaceId); + auto tagName = req.get_tag_name(); + auto ver = req.get_version(); + folly::SharedMutex::ReadHolder rHolder(LockUtils::tagLock()); - auto tagIdRet = getTagId(req.get_space_id(), req.get_tag_name()); - if (!tagIdRet.ok()) { - handleErrorCode(MetaCommon::to(tagIdRet.status())); + auto tagIdRet = getTagId(spaceId, tagName); + if (!nebula::ok(tagIdRet)) { + LOG(ERROR) << "Get tag " << tagName << " failed."; + handleErrorCode(nebula::error(tagIdRet)); onFinished(); return; } - auto tagId = tagIdRet.value(); + auto tagId = nebula::value(tagIdRet); std::string schemaValue; // Get the lastest version - if (req.get_version() < 0) { - auto tagPrefix = MetaServiceUtils::schemaTagPrefix(req.get_space_id(), tagId); + if (ver < 0) { + auto tagPrefix = MetaServiceUtils::schemaTagPrefix(spaceId, tagId); auto ret = doPrefix(tagPrefix); - if (!ret.ok()) { - LOG(ERROR) << "Get Tag SpaceID: " << req.get_space_id() - << ", tagName: " << req.get_tag_name() - << ", version " << req.get_version() << " not found"; + if (!nebula::ok(ret)) { + LOG(ERROR) << "Get Tag SpaceID: " << spaceId << ", tagName: " << tagName + << ", latest version failed."; + handleErrorCode(nebula::error(ret)); + onFinished(); + return; + } + auto iter = nebula::value(ret).get(); + if (!iter->valid()) { + LOG(ERROR) << "Get Tag SpaceID: " << spaceId << ", tagName: " + << tagName << ", latest version " << " not found."; handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); onFinished(); return; } - schemaValue = ret.value()->val().str(); + schemaValue = iter->val().str(); } else { - auto tagKey = MetaServiceUtils::schemaTagKey(req.get_space_id(), - tagId, - req.get_version()); + auto tagKey = MetaServiceUtils::schemaTagKey(spaceId, tagId, ver); auto ret = doGet(tagKey); - if (!ret.ok()) { - LOG(ERROR) << "Get Tag SpaceID: " << req.get_space_id() - << ", tagName: " << req.get_tag_name() - << ", version " << req.get_version() << " not found"; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + if (!nebula::ok(ret)) { + LOG(ERROR) << "Get Tag SpaceID: " << spaceId << ", tagName: " << tagName + << ", version " << ver << " failed."; + handleErrorCode(nebula::error(ret)); onFinished(); return; } - schemaValue = ret.value(); + schemaValue = nebula::value(ret); } - VLOG(3) << "Get Tag SpaceID: " << req.get_space_id() - << ", tagName: " << req.get_tag_name() - << ", version " << req.get_version(); + VLOG(3) << "Get Tag SpaceID: " << spaceId << ", tagName: " << tagName + << ", version " << ver; handleErrorCode(cpp2::ErrorCode::SUCCEEDED); resp_.set_schema(MetaServiceUtils::parseSchema(schemaValue)); onFinished(); } + } // namespace meta } // namespace nebula diff --git a/src/meta/processors/schemaMan/ListEdgesProcessor.cpp b/src/meta/processors/schemaMan/ListEdgesProcessor.cpp index ac29c7110..c9c25afcb 100644 --- a/src/meta/processors/schemaMan/ListEdgesProcessor.cpp +++ b/src/meta/processors/schemaMan/ListEdgesProcessor.cpp @@ -10,18 +10,20 @@ namespace nebula { namespace meta { void ListEdgesProcessor::process(const cpp2::ListEdgesReq& req) { - CHECK_SPACE_ID_AND_RETURN(req.get_space_id()); + GraphSpaceID spaceId = req.get_space_id(); + CHECK_SPACE_ID_AND_RETURN(spaceId); + folly::SharedMutex::ReadHolder rHolder(LockUtils::edgeLock()); - auto spaceId = req.get_space_id(); auto prefix = MetaServiceUtils::schemaEdgesPrefix(spaceId); - std::unique_ptr iter; - auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - handleErrorCode(MetaCommon::to(ret)); - if (ret != kvstore::ResultCode::SUCCEEDED) { + auto ret = doPrefix(prefix); + if (!nebula::ok(ret)) { + LOG(ERROR) << "List Edges failed, SpaceID: " << spaceId; + handleErrorCode(nebula::error(ret)); onFinished(); return; } + auto iter = nebula::value(ret).get(); std::vector edges; while (iter->valid()) { auto key = iter->key(); @@ -43,6 +45,7 @@ void ListEdgesProcessor::process(const cpp2::ListEdgesReq& req) { handleErrorCode(cpp2::ErrorCode::SUCCEEDED); onFinished(); } + } // namespace meta } // namespace nebula diff --git a/src/meta/processors/schemaMan/ListTagsProcessor.cpp b/src/meta/processors/schemaMan/ListTagsProcessor.cpp index 75e13139b..90c5fe30a 100644 --- a/src/meta/processors/schemaMan/ListTagsProcessor.cpp +++ b/src/meta/processors/schemaMan/ListTagsProcessor.cpp @@ -10,17 +10,20 @@ namespace nebula { namespace meta { void ListTagsProcessor::process(const cpp2::ListTagsReq& req) { - CHECK_SPACE_ID_AND_RETURN(req.get_space_id()); + GraphSpaceID spaceId = req.get_space_id(); + CHECK_SPACE_ID_AND_RETURN(spaceId); + folly::SharedMutex::ReadHolder rHolder(LockUtils::tagLock()); - auto spaceId = req.get_space_id(); auto prefix = MetaServiceUtils::schemaTagsPrefix(spaceId); - std::unique_ptr iter; - auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - handleErrorCode(MetaCommon::to(ret)); - if (ret != kvstore::ResultCode::SUCCEEDED) { + auto ret = doPrefix(prefix); + if (!nebula::ok(ret)) { + LOG(ERROR) << "List Tags failed, SpaceID: " << spaceId; + handleErrorCode(nebula::error(ret)); onFinished(); return; } + + auto iter = nebula::value(ret).get(); std::vector tags; while (iter->valid()) { auto key = iter->key(); @@ -42,5 +45,6 @@ void ListTagsProcessor::process(const cpp2::ListTagsReq& req) { handleErrorCode(cpp2::ErrorCode::SUCCEEDED); onFinished(); } + } // namespace meta } // namespace nebula diff --git a/src/meta/processors/usersMan/AuthenticationProcessor.cpp b/src/meta/processors/usersMan/AuthenticationProcessor.cpp index c22e90546..7a9b2555b 100644 --- a/src/meta/processors/usersMan/AuthenticationProcessor.cpp +++ b/src/meta/processors/usersMan/AuthenticationProcessor.cpp @@ -13,17 +13,20 @@ void CreateUserProcessor::process(const cpp2::CreateUserReq& req) { folly::SharedMutex::WriteHolder wHolder(LockUtils::userLock()); const auto& account = req.get_account(); const auto& password = req.get_encoded_pwd(); - auto ret = userExist(account); - if (ret.ok()) { - cpp2::ErrorCode code; - if (req.get_if_not_exists()) { - code = cpp2::ErrorCode::SUCCEEDED; + + auto retCode = userExist(account); + if (retCode != cpp2::ErrorCode::E_NOT_FOUND) { + if (retCode == cpp2::ErrorCode::SUCCEEDED) { + if (!req.get_if_not_exists()) { + LOG(ERROR) << "Create User Failed : User " << account + << " already existed!"; + retCode = cpp2::ErrorCode::E_EXISTED; + } } else { LOG(ERROR) << "Create User Failed : User " << account - << " already existed!"; - code = cpp2::ErrorCode::E_EXISTED; + << " error: " << static_cast(retCode); } - handleErrorCode(code); + handleErrorCode(retCode); onFinished(); return; } @@ -31,7 +34,6 @@ void CreateUserProcessor::process(const cpp2::CreateUserReq& req) { std::vector data; data.emplace_back(MetaServiceUtils::userKey(account), MetaServiceUtils::userVal(password)); - handleErrorCode(cpp2::ErrorCode::SUCCEEDED); doSyncPutAndUpdate(std::move(data)); } @@ -42,16 +44,19 @@ void AlterUserProcessor::process(const cpp2::AlterUserReq& req) { const auto& password = req.get_encoded_pwd(); auto userKey = MetaServiceUtils::userKey(account); auto userVal = MetaServiceUtils::userVal(password); - std::string val; - auto result = kvstore_->get(kDefaultSpaceId, kDefaultPartId, userKey, &val); - if (result != kvstore::ResultCode::SUCCEEDED) { - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + + auto iRet = doGet(userKey); + if (!nebula::ok(iRet)) { + auto errCode = nebula::error(iRet); + LOG(ERROR) << "Get User Failed : User " << account + << " error: " << static_cast(errCode); + handleErrorCode(errCode); onFinished(); return; } + std::vector data; data.emplace_back(std::move(userKey), std::move(userVal)); - handleErrorCode(cpp2::ErrorCode::SUCCEEDED); doSyncPutAndUpdate(std::move(data)); } @@ -59,37 +64,51 @@ void AlterUserProcessor::process(const cpp2::AlterUserReq& req) { void DropUserProcessor::process(const cpp2::DropUserReq& req) { folly::SharedMutex::WriteHolder wHolder(LockUtils::userLock()); const auto& account = req.get_account(); - auto ret = userExist(account); - if (!ret.ok()) { - if (req.get_if_exists()) { - handleErrorCode(cpp2::ErrorCode::SUCCEEDED); + + auto retCode = userExist(account); + if (retCode != cpp2::ErrorCode::SUCCEEDED) { + if (retCode == cpp2::ErrorCode::E_NOT_FOUND) { + if (req.get_if_exists()) { + retCode = cpp2::ErrorCode::SUCCEEDED; + } else { + LOG(ERROR) << "Drop User Failed :" << account << " not found."; + } } else { - LOG(ERROR) << "Drop User Failed :" << account << " not found."; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + LOG(ERROR) << "Drop User Failed : User " << account + << " error: " << static_cast(retCode); } + handleErrorCode(retCode); onFinished(); return; } + std::vector keys; keys.emplace_back(MetaServiceUtils::userKey(account)); // Collect related roles by user. - std::unique_ptr iter; auto prefix = MetaServiceUtils::rolesPrefix(); - auto userRet = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (userRet == kvstore::ResultCode::SUCCEEDED) { - while (iter->valid()) { - auto key = iter->key(); - auto user = MetaServiceUtils::parseRoleUser(key); - if (user == account) { - keys.emplace_back(key); - } - iter->next(); + auto iterRet = doPrefix(prefix); + if (!nebula::ok(iterRet)) { + retCode = nebula::error(iterRet); + // The error of prefix is leader change + LOG(ERROR) << "Drop User Failed : User " << account + << " error: " << static_cast(retCode); + handleErrorCode(retCode); + onFinished(); + return; + } + + auto iter = nebula::value(iterRet).get(); + while (iter->valid()) { + auto key = iter->key(); + auto user = MetaServiceUtils::parseRoleUser(key); + if (user == account) { + keys.emplace_back(key); } + iter->next(); } - handleErrorCode(cpp2::ErrorCode::SUCCEEDED); - LOG(INFO) << "Drop User " << req.get_account(); + LOG(INFO) << "Drop User " << account; doSyncMultiRemoveAndUpdate({std::move(keys)}); } @@ -99,6 +118,8 @@ void GrantProcessor::process(const cpp2::GrantRoleReq& req) { folly::SharedMutex::ReadHolder spaceHolder(LockUtils::spaceLock()); const auto& roleItem = req.get_role_item(); auto spaceId = roleItem.get_space_id(); + const auto& account = roleItem.get_user_id(); + /** * for cloud authority, need init a god user by this interface. the god user default grant to * meta space (kDefaultSpaceId). so skip the space check. @@ -108,17 +129,18 @@ void GrantProcessor::process(const cpp2::GrantRoleReq& req) { if (!(spaceId == kDefaultSpaceId && roleItem.get_role_type() == cpp2::RoleType::GOD)) { CHECK_SPACE_ID_AND_RETURN(spaceId); } - auto userRet = userExist(roleItem.get_user_id()); - if (!userRet.ok()) { - handleErrorCode(MetaCommon::to(userRet)); + auto retCode = userExist(account); + if (retCode != cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Grant User Failed : User " << account + << " error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } std::vector data; - data.emplace_back(MetaServiceUtils::roleKey(spaceId, roleItem.get_user_id()), + data.emplace_back(MetaServiceUtils::roleKey(spaceId, account), MetaServiceUtils::roleVal(roleItem.get_role_type())); - handleErrorCode(cpp2::ErrorCode::SUCCEEDED); doSyncPutAndUpdate(std::move(data)); } @@ -129,52 +151,73 @@ void RevokeProcessor::process(const cpp2::RevokeRoleReq& req) { const auto& roleItem = req.get_role_item(); auto spaceId = roleItem.get_space_id(); CHECK_SPACE_ID_AND_RETURN(spaceId); - auto userRet = userExist(roleItem.get_user_id()); - if (!userRet.ok()) { - handleErrorCode(MetaCommon::to(userRet)); + const auto& account = roleItem.get_user_id(); + + auto userRet = userExist(account); + if (userRet != cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Revoke User Failed : User " << account + << " error: " << static_cast(userRet); + handleErrorCode(userRet); onFinished(); return; } - auto roleKey = MetaServiceUtils::roleKey(spaceId, roleItem.get_user_id()); + auto roleKey = MetaServiceUtils::roleKey(spaceId, account); auto result = doGet(roleKey); - if (!result.ok()) { - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + if (!nebula::ok(result)) { + userRet = nebula::error(result); + LOG(ERROR) << "Get Role User Failed : User " << account + << " error: " << static_cast(userRet); + handleErrorCode(userRet); onFinished(); return; } - auto val = result.value(); + auto val = nebula::value(result); const auto role = *reinterpret_cast(val.c_str()); if (role != roleItem.get_role_type()) { + LOG(ERROR) << "Revoke User Failed : User " << account + << " error: " << static_cast(cpp2::ErrorCode::E_IMPROPER_ROLE); handleErrorCode(cpp2::ErrorCode::E_IMPROPER_ROLE); onFinished(); return; } - handleErrorCode(cpp2::ErrorCode::SUCCEEDED); doSyncMultiRemoveAndUpdate({std::move(roleKey)}); } void ChangePasswordProcessor::process(const cpp2::ChangePasswordReq& req) { folly::SharedMutex::WriteHolder wHolder(LockUtils::userLock()); - auto userRet = userExist(req.get_account()); - if (!userRet.ok()) { - handleErrorCode(MetaCommon::to(userRet)); + const auto& account = req.get_account(); + auto userRet = userExist(account); + if (userRet != cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Change password Failed, get user " << account << " failed, " + << " error: " << static_cast(userRet); + handleErrorCode(userRet); onFinished(); return; } - if (!checkPassword(req.get_account(), req.get_old_encoded_pwd())) { - handleErrorCode(cpp2::ErrorCode::E_INVALID_PASSWORD); + auto checkRet = checkPassword(account, req.get_old_encoded_pwd()); + if (!nebula::ok(checkRet)) { + auto retCode = nebula::error(checkRet); + LOG(ERROR) << "Get user " << account << " failed, " + << " error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; + } else { + if (!nebula::value(checkRet)) { + LOG(ERROR) << "Check password failed, user " << account; + handleErrorCode(cpp2::ErrorCode::E_INVALID_PASSWORD); + onFinished(); + return; + } } - auto userKey = MetaServiceUtils::userKey(req.get_account()); + auto userKey = MetaServiceUtils::userKey(account); auto userVal = MetaServiceUtils::userVal(req.get_new_encoded_pwd()); std::vector data; data.emplace_back(std::move(userKey), std::move(userVal)); - handleErrorCode(cpp2::ErrorCode::SUCCEEDED); doSyncPutAndUpdate(std::move(data)); } @@ -182,23 +225,25 @@ void ChangePasswordProcessor::process(const cpp2::ChangePasswordReq& req) { void ListUsersProcessor::process(const cpp2::ListUsersReq& req) { UNUSED(req); folly::SharedMutex::ReadHolder rHolder(LockUtils::userLock()); - std::unique_ptr iter; std::string prefix = "__users__"; - auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (ret != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Can't find any users."; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + auto ret = doPrefix(prefix); + if (!nebula::ok(ret)) { + auto retCode = nebula::error(ret); + LOG(ERROR) << "List User failed, error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } + + auto iter = nebula::value(ret).get(); std::unordered_map users; while (iter->valid()) { auto account = MetaServiceUtils::parseUser(iter->key()); auto password = MetaServiceUtils::parseUserPwd(iter->val()); - users.emplace(std::pair(std::move(account), std::move(password))); + users.emplace(std::move(account), std::move(password)); iter->next(); } - resp_.set_users(users); + resp_.set_users(std::move(users)); handleErrorCode(cpp2::ErrorCode::SUCCEEDED); onFinished(); } @@ -207,17 +252,19 @@ void ListUsersProcessor::process(const cpp2::ListUsersReq& req) { void ListRolesProcessor::process(const cpp2::ListRolesReq& req) { auto spaceId = req.get_space_id(); CHECK_SPACE_ID_AND_RETURN(spaceId); + folly::SharedMutex::ReadHolder rHolder(LockUtils::userLock()); auto prefix = MetaServiceUtils::roleSpacePrefix(spaceId); - std::unique_ptr iter; - auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (ret != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Can't find any roles by space id " << spaceId; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + auto ret = doPrefix(prefix); + if (!nebula::ok(ret)) { + auto retCode = nebula::error(ret); + LOG(ERROR) << "List roles failed, error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } + auto iter = nebula::value(ret).get(); std::vector roles; while (iter->valid()) { auto account = MetaServiceUtils::parseRoleUser(iter->key()); @@ -226,41 +273,45 @@ void ListRolesProcessor::process(const cpp2::ListRolesReq& req) { role.set_user_id(std::move(account)); role.set_space_id(spaceId); role.set_role_type(*reinterpret_cast(val.begin())); - roles.emplace_back(role); + roles.emplace_back(std::move(role)); iter->next(); } - resp_.set_roles(roles); + resp_.set_roles(std::move(roles)); handleErrorCode(cpp2::ErrorCode::SUCCEEDED); onFinished(); } void GetUserRolesProcessor::process(const cpp2::GetUserRolesReq& req) { folly::SharedMutex::WriteHolder wHolder(LockUtils::userLock()); + const auto& act = req.get_account(); + auto prefix = MetaServiceUtils::rolesPrefix(); - std::unique_ptr iter; - auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (ret != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Can't find any roles by user " << req.get_account(); - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + auto ret = doPrefix(prefix); + if (!nebula::ok(ret)) { + auto retCode = nebula::error(ret); + LOG(ERROR) << "Prefix roles failed, error: " + << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } + auto iter = nebula::value(ret).get(); std::vector roles; while (iter->valid()) { auto account = MetaServiceUtils::parseRoleUser(iter->key()); auto spaceId = MetaServiceUtils::parseRoleSpace(iter->key()); - if (account == req.get_account()) { + if (account == act) { auto val = iter->val(); cpp2::RoleItem role; role.set_user_id(std::move(account)); role.set_space_id(spaceId); role.set_role_type(*reinterpret_cast(val.begin())); - roles.emplace_back(role); + roles.emplace_back(std::move(role)); } iter->next(); } - resp_.set_roles(roles); + resp_.set_roles(std::move(roles)); handleErrorCode(cpp2::ErrorCode::SUCCEEDED); onFinished(); } diff --git a/src/meta/processors/usersMan/AuthenticationProcessor.h b/src/meta/processors/usersMan/AuthenticationProcessor.h index 9d738124b..122be5ec0 100644 --- a/src/meta/processors/usersMan/AuthenticationProcessor.h +++ b/src/meta/processors/usersMan/AuthenticationProcessor.h @@ -134,6 +134,8 @@ class GetUserRolesProcessor : public BaseProcessor { explicit GetUserRolesProcessor(kvstore::KVStore* kvstore) : BaseProcessor(kvstore) {} }; + } // namespace meta -} // namespace nebula +} // namespae nebula + #endif // META_AUTHENTICATIONPROCESSOR_H diff --git a/src/meta/processors/zoneMan/AddGroupProcessor.cpp b/src/meta/processors/zoneMan/AddGroupProcessor.cpp index 14d476c4a..5bafab76f 100644 --- a/src/meta/processors/zoneMan/AddGroupProcessor.cpp +++ b/src/meta/processors/zoneMan/AddGroupProcessor.cpp @@ -28,18 +28,28 @@ void AddGroupProcessor::process(const cpp2::AddGroupReq& req) { return; } - if (!checkGroupRedundancy(zoneNames)) { - handleErrorCode(cpp2::ErrorCode::E_INVALID_PARM); + auto retCode = checkGroupRedundancy(zoneNames); + if (retCode != cpp2::ErrorCode::SUCCEEDED) { + handleErrorCode(retCode); onFinished(); return; } auto groupRet = getGroupId(groupName); - if (groupRet.ok()) { + if (nebula::ok(groupRet)) { LOG(ERROR) << "Group " << groupName << " already existed"; handleErrorCode(cpp2::ErrorCode::E_EXISTED); onFinished(); return; + } else { + retCode = nebula::error(groupRet); + if (retCode != cpp2::ErrorCode::E_NOT_FOUND) { + LOG(ERROR) << "Create Group failed, group name " << groupName << " error: " + << static_cast(retCode); + handleErrorCode(retCode); + onFinished(); + return; + } } auto groupIdRet = autoIncrementId(); @@ -61,14 +71,15 @@ void AddGroupProcessor::process(const cpp2::AddGroupReq& req) { doSyncPutAndUpdate(std::move(data)); } -bool AddGroupProcessor::checkGroupRedundancy(std::vector zones) { - auto prefix = MetaServiceUtils::groupPrefix(); - std::unique_ptr iter; - auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (ret != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Get groups failed"; - return false; +cpp2::ErrorCode AddGroupProcessor::checkGroupRedundancy(std::vector zones) { + const auto& prefix = MetaServiceUtils::groupPrefix(); + auto iterRet = doPrefix(prefix); + if (!nebula::ok(iterRet)) { + auto retCode = nebula::error(iterRet); + LOG(ERROR) << "Get groups failed, error: " << static_cast(retCode); + return retCode; } + auto iter = nebula::value(iterRet).get(); std::sort(zones.begin(), zones.end()); while (iter->valid()) { @@ -78,11 +89,11 @@ bool AddGroupProcessor::checkGroupRedundancy(std::vector zones) { if (zones == zoneNames) { LOG(ERROR) << "Group " << groupName << " have created, although the zones order maybe not the same"; - return false; + return cpp2::ErrorCode::E_EXISTED; } iter->next(); } - return true; + return cpp2::ErrorCode::SUCCEEDED; } } // namespace meta diff --git a/src/meta/processors/zoneMan/AddGroupProcessor.h b/src/meta/processors/zoneMan/AddGroupProcessor.h index a892c8486..81889bdb3 100644 --- a/src/meta/processors/zoneMan/AddGroupProcessor.h +++ b/src/meta/processors/zoneMan/AddGroupProcessor.h @@ -24,10 +24,11 @@ class AddGroupProcessor : public BaseProcessor { explicit AddGroupProcessor(kvstore::KVStore* kvstore) : BaseProcessor(kvstore) {} - bool checkGroupRedundancy(std::vector zones); + cpp2::ErrorCode checkGroupRedundancy(std::vector zones); }; } // namespace meta -} // namespace nebula +} // namspace nebula + #endif // META_ADDGROUPPROCESSOR_H diff --git a/src/meta/processors/zoneMan/AddZoneProcessor.cpp b/src/meta/processors/zoneMan/AddZoneProcessor.cpp index 2bbb7fc4a..a71bc47b5 100644 --- a/src/meta/processors/zoneMan/AddZoneProcessor.cpp +++ b/src/meta/processors/zoneMan/AddZoneProcessor.cpp @@ -30,7 +30,16 @@ void AddZoneProcessor::process(const cpp2::AddZoneReq& req) { return; } - auto activeHosts = ActiveHostsMan::getActiveHosts(kvstore_); + auto activeHostsRet = ActiveHostsMan::getActiveHosts(kvstore_); + if (!nebula::ok(activeHostsRet)) { + auto retCode = nebula::error(activeHostsRet); + LOG(ERROR) << "Create zone failed, error: " << static_cast(retCode); + handleErrorCode(retCode); + onFinished(); + return; + } + auto activeHosts = nebula::value(activeHostsRet); + std::sort(activeHosts.begin(), activeHosts.end()); if (!std::includes(activeHosts.begin(), activeHosts.end(), nodeSet.begin(), nodeSet.end())) { LOG(ERROR) << "Host not exist"; @@ -40,16 +49,26 @@ void AddZoneProcessor::process(const cpp2::AddZoneReq& req) { } auto zoneIdRet = getZoneId(zoneName); - if (zoneIdRet.ok()) { + if (nebula::ok(zoneIdRet)) { LOG(ERROR) << "Zone " << zoneName << " already existed"; handleErrorCode(cpp2::ErrorCode::E_EXISTED); onFinished(); return; + } else { + auto retCode = nebula::error(zoneIdRet); + if (retCode != cpp2::ErrorCode::E_NOT_FOUND) { + LOG(ERROR) << "Create zone failed, zone name " << zoneName << " error: " + << static_cast(retCode); + handleErrorCode(retCode); + onFinished(); + return; + } } // Check the node is not include in another zone - if (!checkHostNotOverlap(nodes)) { - handleErrorCode(cpp2::ErrorCode::E_INVALID_PARM); + auto retCode = checkHostNotOverlap(nodes); + if (retCode != cpp2::ErrorCode::SUCCEEDED) { + handleErrorCode(retCode); onFinished(); return; } @@ -73,14 +92,15 @@ void AddZoneProcessor::process(const cpp2::AddZoneReq& req) { doSyncPutAndUpdate(std::move(data)); } -bool AddZoneProcessor::checkHostNotOverlap(const std::vector& nodes) { - auto prefix = MetaServiceUtils::zonePrefix(); - std::unique_ptr iter; - auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (ret != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "Get zones failed"; - return false; +cpp2::ErrorCode AddZoneProcessor::checkHostNotOverlap(const std::vector& nodes) { + const auto& prefix = MetaServiceUtils::zonePrefix(); + auto iterRet = doPrefix(prefix); + if (!nebula::ok(iterRet)) { + auto retCode = nebula::error(iterRet); + LOG(ERROR) << "Get zones failed, error: " << static_cast(retCode); + return retCode; } + auto iter = nebula::value(iterRet).get(); while (iter->valid()) { auto zoneName = MetaServiceUtils::parseZoneName(iter->key()); @@ -89,12 +109,12 @@ bool AddZoneProcessor::checkHostNotOverlap(const std::vector& nodes) { auto hostIter = std::find(hosts.begin(), hosts.end(), node); if (hostIter != hosts.end()) { LOG(ERROR) << "Host overlap found in zone " << zoneName; - return false; + return cpp2::ErrorCode::E_INVALID_PARM; } } iter->next(); } - return true; + return cpp2::ErrorCode::SUCCEEDED; } } // namespace meta diff --git a/src/meta/processors/zoneMan/AddZoneProcessor.h b/src/meta/processors/zoneMan/AddZoneProcessor.h index 439eabac7..9329e1176 100644 --- a/src/meta/processors/zoneMan/AddZoneProcessor.h +++ b/src/meta/processors/zoneMan/AddZoneProcessor.h @@ -24,7 +24,7 @@ class AddZoneProcessor : public BaseProcessor { explicit AddZoneProcessor(kvstore::KVStore* kvstore) : BaseProcessor(kvstore) {} - bool checkHostNotOverlap(const std::vector& nodes); + cpp2::ErrorCode checkHostNotOverlap(const std::vector& nodes); }; } // namespace meta diff --git a/src/meta/processors/zoneMan/DropGroupProcessor.cpp b/src/meta/processors/zoneMan/DropGroupProcessor.cpp index a0eb0111f..11b6fbf09 100644 --- a/src/meta/processors/zoneMan/DropGroupProcessor.cpp +++ b/src/meta/processors/zoneMan/DropGroupProcessor.cpp @@ -13,15 +13,23 @@ void DropGroupProcessor::process(const cpp2::DropGroupReq& req) { folly::SharedMutex::WriteHolder wHolder(LockUtils::groupLock()); auto groupName = req.get_group_name(); auto groupIdRet = getGroupId(groupName); - if (!groupIdRet.ok()) { - LOG(ERROR) << "Group: " << groupName << " not found"; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + if (!nebula::ok(groupIdRet)) { + auto retCode = nebula::error(groupIdRet); + if (retCode == cpp2::ErrorCode::E_NOT_FOUND) { + LOG(ERROR) << "Drop Group Failed, Group " << groupName << " not found."; + } else { + LOG(ERROR) << "Drop Group Failed, error: " << static_cast(retCode); + } + handleErrorCode(retCode); onFinished(); return; } // If any space rely on this group, it should not be droped. - if (!checkSpaceDependency(groupName)) { + auto retCode = checkSpaceDependency(groupName); + if (retCode != cpp2::ErrorCode::SUCCEEDED) { + handleErrorCode(retCode); + onFinished(); return; } @@ -32,16 +40,15 @@ void DropGroupProcessor::process(const cpp2::DropGroupReq& req) { doSyncMultiRemoveAndUpdate(std::move(keys)); } -bool DropGroupProcessor::checkSpaceDependency(const std::string& groupName) { - auto prefix = MetaServiceUtils::spacePrefix(); - std::unique_ptr iter; - auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (ret != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "List spaces failed"; - handleErrorCode(MetaCommon::to(ret)); - onFinished(); - return false; +cpp2::ErrorCode DropGroupProcessor::checkSpaceDependency(const std::string& groupName) { + const auto& prefix = MetaServiceUtils::spacePrefix(); + auto iterRet = doPrefix(prefix); + if (!nebula::ok(iterRet)) { + auto retCode = nebula::error(iterRet); + LOG(ERROR) << "List spaces failed, error: " << static_cast(retCode); + return retCode; } + auto iter = nebula::value(iterRet).get(); while (iter->valid()) { auto properties = MetaServiceUtils::parseSpace(iter->val()); @@ -49,13 +56,12 @@ bool DropGroupProcessor::checkSpaceDependency(const std::string& groupName) { *properties.group_name_ref() == groupName) { LOG(ERROR) << "Space " << properties.get_space_name() << " is bind to the group " << groupName; - handleErrorCode(cpp2::ErrorCode::E_NOT_DROP); - onFinished(); - return false; + return cpp2::ErrorCode::E_NOT_DROP; } iter->next(); } - return true; + return cpp2::ErrorCode::SUCCEEDED; } + } // namespace meta } // namespace nebula diff --git a/src/meta/processors/zoneMan/DropGroupProcessor.h b/src/meta/processors/zoneMan/DropGroupProcessor.h index a369337f1..6f2b1a77d 100644 --- a/src/meta/processors/zoneMan/DropGroupProcessor.h +++ b/src/meta/processors/zoneMan/DropGroupProcessor.h @@ -24,7 +24,7 @@ class DropGroupProcessor : public BaseProcessor { explicit DropGroupProcessor(kvstore::KVStore* kvstore) : BaseProcessor(kvstore) {} - bool checkSpaceDependency(const std::string& groupName); + cpp2::ErrorCode checkSpaceDependency(const std::string& groupName); }; } // namespace meta diff --git a/src/meta/processors/zoneMan/DropZoneProcessor.cpp b/src/meta/processors/zoneMan/DropZoneProcessor.cpp index bf85a0450..38f57ab43 100644 --- a/src/meta/processors/zoneMan/DropZoneProcessor.cpp +++ b/src/meta/processors/zoneMan/DropZoneProcessor.cpp @@ -13,15 +13,23 @@ void DropZoneProcessor::process(const cpp2::DropZoneReq& req) { folly::SharedMutex::WriteHolder wHolder(LockUtils::zoneLock()); auto zoneName = req.get_zone_name(); auto zoneIdRet = getZoneId(zoneName); - if (!zoneIdRet.ok()) { - LOG(ERROR) << "Zone " << zoneName << " not found"; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + if (!nebula::ok(zoneIdRet)) { + auto retCode = nebula::error(zoneIdRet); + if (retCode == cpp2::ErrorCode::E_NOT_FOUND) { + LOG(ERROR) << "Drop Zone Failed, Zone " << zoneName << " not found."; + } else { + LOG(ERROR) << "Drop Zone Failed, error: " << static_cast(retCode); + } + handleErrorCode(retCode); onFinished(); return; } // If zone belong to any group, it should not be droped. - if (!checkGroupDependency(zoneName)) { + auto retCode = checkGroupDependency(zoneName); + if (retCode != cpp2::ErrorCode::SUCCEEDED) { + handleErrorCode(retCode); + onFinished(); return; } @@ -32,16 +40,15 @@ void DropZoneProcessor::process(const cpp2::DropZoneReq& req) { doSyncMultiRemoveAndUpdate(std::move(keys)); } -bool DropZoneProcessor::checkGroupDependency(const std::string& zoneName) { - auto prefix = MetaServiceUtils::groupPrefix(); - std::unique_ptr iter; - auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (ret != kvstore::ResultCode::SUCCEEDED) { - LOG(ERROR) << "List zones failed"; - handleErrorCode(MetaCommon::to(ret)); - onFinished(); - return false; +cpp2::ErrorCode DropZoneProcessor::checkGroupDependency(const std::string& zoneName) { + const auto& prefix = MetaServiceUtils::groupPrefix(); + auto iterRet = doPrefix(prefix); + if (!nebula::ok(iterRet)) { + auto retCode = nebula::error(iterRet); + LOG(ERROR) << "List zones failed, error: " << static_cast(retCode); + return retCode; } + auto iter = nebula::value(iterRet).get(); while (iter->valid()) { auto zoneNames = MetaServiceUtils::parseZoneNames(iter->val()); @@ -49,13 +56,11 @@ bool DropZoneProcessor::checkGroupDependency(const std::string& zoneName) { if (zoneIter != zoneNames.end()) { auto groupName = MetaServiceUtils::parseGroupName(iter->key()); LOG(ERROR) << "Zone " << zoneName << " is belong to Group " << groupName; - handleErrorCode(cpp2::ErrorCode::E_NOT_DROP); - onFinished(); - return false; + return cpp2::ErrorCode::E_NOT_DROP; } iter->next(); } - return true; + return cpp2::ErrorCode::SUCCEEDED; } } // namespace meta diff --git a/src/meta/processors/zoneMan/DropZoneProcessor.h b/src/meta/processors/zoneMan/DropZoneProcessor.h index b096a19f4..a7533a94d 100644 --- a/src/meta/processors/zoneMan/DropZoneProcessor.h +++ b/src/meta/processors/zoneMan/DropZoneProcessor.h @@ -24,7 +24,7 @@ class DropZoneProcessor : public BaseProcessor { explicit DropZoneProcessor(kvstore::KVStore* kvstore) : BaseProcessor(kvstore) {} - bool checkGroupDependency(const std::string& zoneName); + cpp2::ErrorCode checkGroupDependency(const std::string& zoneName); }; } // namespace meta diff --git a/src/meta/processors/zoneMan/GetGroupProcessor.cpp b/src/meta/processors/zoneMan/GetGroupProcessor.cpp index 4614438eb..be8b9ea91 100644 --- a/src/meta/processors/zoneMan/GetGroupProcessor.cpp +++ b/src/meta/processors/zoneMan/GetGroupProcessor.cpp @@ -11,25 +11,32 @@ namespace meta { void GetGroupProcessor::process(const cpp2::GetGroupReq& req) { folly::SharedMutex::ReadHolder rHolder(LockUtils::groupLock()); - auto groupName = req.get_group_name(); + auto& groupName = req.get_group_name(); auto groupIdRet = getGroupId(groupName); - if (!groupIdRet.ok()) { - LOG(ERROR) << "Group: " << groupName << " not found"; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + if (!nebula::ok(groupIdRet)) { + auto retCode = nebula::error(groupIdRet); + if (retCode == cpp2::ErrorCode::E_NOT_FOUND) { + LOG(ERROR) << "Get Group Failed, Group " << groupName << " not found."; + } else { + LOG(ERROR) << "Get Group Failed, error: " << static_cast(retCode); + } + handleErrorCode(retCode); onFinished(); return; } auto groupKey = MetaServiceUtils::groupKey(groupName); auto groupValueRet = doGet(std::move(groupKey)); - if (!groupValueRet.ok()) { - LOG(ERROR) << "Get group " << groupName << " failed"; - handleErrorCode(cpp2::ErrorCode::E_STORE_FAILURE); + if (!nebula::ok(groupValueRet)) { + auto retCode = nebula::error(groupValueRet); + LOG(ERROR) << "Get group " << groupName << " failed, error " + << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } - auto zoneNames = MetaServiceUtils::parseZoneNames(std::move(groupValueRet).value()); + auto zoneNames = MetaServiceUtils::parseZoneNames(std::move(nebula::value(groupValueRet))); LOG(INFO) << "Get Group: " << groupName << " zone size: " << zoneNames.size(); resp_.set_zone_names(std::move(zoneNames)); handleErrorCode(cpp2::ErrorCode::SUCCEEDED); diff --git a/src/meta/processors/zoneMan/GetZoneProcessor.cpp b/src/meta/processors/zoneMan/GetZoneProcessor.cpp index 4163f9c9e..75136364a 100644 --- a/src/meta/processors/zoneMan/GetZoneProcessor.cpp +++ b/src/meta/processors/zoneMan/GetZoneProcessor.cpp @@ -13,22 +13,30 @@ void GetZoneProcessor::process(const cpp2::GetZoneReq& req) { folly::SharedMutex::ReadHolder rHolder(LockUtils::zoneLock()); auto zoneName = req.get_zone_name(); auto zoneIdRet = getZoneId(zoneName); - if (!zoneIdRet.ok()) { - LOG(ERROR) << "Zone " << zoneName << " not found"; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + if (!nebula::ok(zoneIdRet)) { + auto retCode = nebula::error(zoneIdRet); + if (retCode == cpp2::ErrorCode::E_NOT_FOUND) { + LOG(ERROR) << "Get Zone Failed, Zone " << zoneName << " not found."; + } else { + LOG(ERROR) << "Get Zone Failed, error: " << static_cast(retCode); + } + handleErrorCode(retCode); onFinished(); return; } auto zoneKey = MetaServiceUtils::zoneKey(zoneName); auto zoneValueRet = doGet(std::move(zoneKey)); - if (!zoneValueRet.ok()) { - LOG(ERROR) << "Get zone " << zoneName << " failed"; - handleErrorCode(cpp2::ErrorCode::E_STORE_FAILURE); + if (!nebula::ok(zoneValueRet)) { + auto retCode = nebula::error(zoneValueRet); + LOG(ERROR) << "Get zone " << zoneName << " failed, error: " + << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } - auto hosts = MetaServiceUtils::parseZoneHosts(std::move(zoneValueRet).value()); + + auto hosts = MetaServiceUtils::parseZoneHosts(std::move(nebula::value(zoneValueRet))); LOG(INFO) << "Get Zone: " << zoneName << " node size: " << hosts.size(); resp_.set_hosts(std::move(hosts)); handleErrorCode(cpp2::ErrorCode::SUCCEEDED); diff --git a/src/meta/processors/zoneMan/ListGroupsProcessor.cpp b/src/meta/processors/zoneMan/ListGroupsProcessor.cpp index 5600af453..ceaf7ba17 100644 --- a/src/meta/processors/zoneMan/ListGroupsProcessor.cpp +++ b/src/meta/processors/zoneMan/ListGroupsProcessor.cpp @@ -11,14 +11,16 @@ namespace meta { void ListGroupsProcessor::process(const cpp2::ListGroupsReq&) { folly::SharedMutex::ReadHolder rHolder(LockUtils::groupLock()); - auto prefix = MetaServiceUtils::groupPrefix(); - std::unique_ptr iter; - auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (ret != kvstore::ResultCode::SUCCEEDED) { - handleErrorCode(MetaCommon::to(ret)); + const auto& prefix = MetaServiceUtils::groupPrefix(); + auto iterRet = doPrefix(prefix); + if (!nebula::ok(iterRet)) { + auto retCode = nebula::error(iterRet); + LOG(ERROR) << "List groups failed, error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } + auto iter = nebula::value(iterRet).get(); std::vector groups; while (iter->valid()) { diff --git a/src/meta/processors/zoneMan/ListZonesProcessor.cpp b/src/meta/processors/zoneMan/ListZonesProcessor.cpp index 4fed5eb3b..d1d466305 100644 --- a/src/meta/processors/zoneMan/ListZonesProcessor.cpp +++ b/src/meta/processors/zoneMan/ListZonesProcessor.cpp @@ -11,14 +11,16 @@ namespace meta { void ListZonesProcessor::process(const cpp2::ListZonesReq&) { folly::SharedMutex::ReadHolder rHolder(LockUtils::zoneLock()); - auto prefix = MetaServiceUtils::zonePrefix(); - std::unique_ptr iter; - auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (ret != kvstore::ResultCode::SUCCEEDED) { - handleErrorCode(MetaCommon::to(ret)); + const auto& prefix = MetaServiceUtils::zonePrefix(); + auto iterRet = doPrefix(prefix); + if (!nebula::ok(iterRet)) { + auto retCode = nebula::error(iterRet); + LOG(ERROR) << "List zones failed, error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } + auto iter = nebula::value(iterRet).get(); std::vector zones; while (iter->valid()) { diff --git a/src/meta/processors/zoneMan/UpdateGroupProcessor.cpp b/src/meta/processors/zoneMan/UpdateGroupProcessor.cpp index 75cc6bb27..cb974ec6b 100644 --- a/src/meta/processors/zoneMan/UpdateGroupProcessor.cpp +++ b/src/meta/processors/zoneMan/UpdateGroupProcessor.cpp @@ -13,24 +13,28 @@ void AddZoneIntoGroupProcessor::process(const cpp2::AddZoneIntoGroupReq& req) { folly::SharedMutex::ReadHolder rHolder(LockUtils::groupLock()); auto groupName = req.get_group_name(); auto groupIdRet = getGroupId(groupName); - if (!groupIdRet.ok()) { - LOG(ERROR) << "Group: " << groupName << " not found"; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + if (!nebula::ok(groupIdRet)) { + auto retCode = nebula::error(groupIdRet); + LOG(ERROR) << "Get Group failed, group " << groupName + << " error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } auto groupKey = MetaServiceUtils::groupKey(groupName); auto groupValueRet = doGet(std::move(groupKey)); - if (!groupValueRet.ok()) { - LOG(ERROR) << "Get group " << groupName << " failed"; - handleErrorCode(cpp2::ErrorCode::E_STORE_FAILURE); + if (!nebula::ok(groupValueRet)) { + auto retCode = nebula::error(groupValueRet); + LOG(ERROR) << "Get group " << groupName << " failed, error " + << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } auto zoneName = req.get_zone_name(); - auto zoneNames = MetaServiceUtils::parseZoneNames(std::move(groupValueRet).value()); + auto zoneNames = MetaServiceUtils::parseZoneNames(std::move(nebula::value(groupValueRet))); auto iter = std::find(zoneNames.begin(), zoneNames.end(), zoneName); if (iter != zoneNames.end()) { LOG(ERROR) << "Zone " << zoneName << " already exist in the group " << groupName; @@ -39,20 +43,23 @@ void AddZoneIntoGroupProcessor::process(const cpp2::AddZoneIntoGroupReq& req) { return; } - auto zonePrefix = MetaServiceUtils::zonePrefix(); - std::unique_ptr zoneIter; - auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, zonePrefix, &zoneIter); - if (ret != kvstore::ResultCode::SUCCEEDED) { - handleErrorCode(MetaCommon::to(ret)); + const auto& zonePrefix = MetaServiceUtils::zonePrefix(); + auto iterRet = doPrefix(zonePrefix); + if (!nebula::ok(iterRet)) { + auto retCode = nebula::error(iterRet); + LOG(ERROR) << "Get zones failed, error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } + auto zoneIter = nebula::value(iterRet).get(); bool found = false; while (zoneIter->valid()) { auto name = MetaServiceUtils::parseZoneName(zoneIter->key()); if (name == zoneName) { found = true; + break; } zoneIter->next(); } @@ -75,24 +82,28 @@ void DropZoneFromGroupProcessor::process(const cpp2::DropZoneFromGroupReq& req) folly::SharedMutex::ReadHolder rHolder(LockUtils::groupLock()); auto groupName = req.get_group_name(); auto groupIdRet = getGroupId(groupName); - if (!groupIdRet.ok()) { - LOG(ERROR) << "Group: " << groupName << " not found"; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + if (!nebula::ok(groupIdRet)) { + auto retCode = nebula::error(groupIdRet); + LOG(ERROR) << " Get Group " << groupName << " failed, error: " + << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } auto groupKey = MetaServiceUtils::groupKey(groupName); auto groupValueRet = doGet(groupKey); - if (!groupValueRet.ok()) { - LOG(ERROR) << "Get group " << groupName << " failed"; - handleErrorCode(cpp2::ErrorCode::E_STORE_FAILURE); + if (!nebula::ok(groupValueRet)) { + auto retCode = nebula::error(groupValueRet); + LOG(ERROR) << "Get group " << groupName << " failed, error: " + << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } auto zoneName = req.get_zone_name(); - auto zoneNames = MetaServiceUtils::parseZoneNames(std::move(groupValueRet).value()); + auto zoneNames = MetaServiceUtils::parseZoneNames(std::move(nebula::value(groupValueRet))); auto iter = std::find(zoneNames.begin(), zoneNames.end(), zoneName); if (iter == zoneNames.end()) { LOG(ERROR) << "Zone " << zoneName << " not exist in the group " << groupName; diff --git a/src/meta/processors/zoneMan/UpdateZoneProcessor.cpp b/src/meta/processors/zoneMan/UpdateZoneProcessor.cpp index 8fb23396e..c069d8253 100644 --- a/src/meta/processors/zoneMan/UpdateZoneProcessor.cpp +++ b/src/meta/processors/zoneMan/UpdateZoneProcessor.cpp @@ -15,23 +15,27 @@ void AddHostIntoZoneProcessor::process(const cpp2::AddHostIntoZoneReq& req) { folly::SharedMutex::ReadHolder rHolder(LockUtils::zoneLock()); auto zoneName = req.get_zone_name(); auto zoneIdRet = getZoneId(zoneName); - if (!zoneIdRet.ok()) { - LOG(ERROR) << "Zone " << zoneName << " not found"; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + if (!nebula::ok(zoneIdRet)) { + auto retCode = nebula::error(zoneIdRet); + LOG(ERROR) << "Get Zone failed, zone " << zoneName + << " error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } auto zoneKey = MetaServiceUtils::zoneKey(zoneName); auto zoneValueRet = doGet(std::move(zoneKey)); - if (!zoneValueRet.ok()) { - LOG(ERROR) << "Get zone " << zoneName << " failed"; - handleErrorCode(cpp2::ErrorCode::E_STORE_FAILURE); + if (!nebula::ok(zoneValueRet)) { + auto retCode = nebula::error(zoneValueRet); + LOG(ERROR) << "Get zone " << zoneName << " failed, error " + << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } - auto hosts = MetaServiceUtils::parseZoneHosts(std::move(zoneValueRet).value()); + auto hosts = MetaServiceUtils::parseZoneHosts(std::move(nebula::value(zoneValueRet))); auto host = req.get_node(); auto iter = std::find(hosts.begin(), hosts.end(), host); if (iter != hosts.end()) { @@ -41,7 +45,16 @@ void AddHostIntoZoneProcessor::process(const cpp2::AddHostIntoZoneReq& req) { return; } - auto activeHosts = ActiveHostsMan::getActiveHosts(kvstore_); + auto activeHostsRet = ActiveHostsMan::getActiveHosts(kvstore_); + if (!nebula::ok(activeHostsRet)) { + auto retCode = nebula::error(activeHostsRet); + LOG(ERROR) << "Get hosts failed, error: " << static_cast(retCode); + handleErrorCode(retCode); + onFinished(); + return; + } + auto activeHosts = nebula::value(activeHostsRet); + auto found = std::find(activeHosts.begin(), activeHosts.end(), host); if (found == activeHosts.end()) { LOG(ERROR) << "Host " << host << " not exist"; @@ -61,23 +74,27 @@ void DropHostFromZoneProcessor::process(const cpp2::DropHostFromZoneReq& req) { folly::SharedMutex::ReadHolder rHolder(LockUtils::zoneLock()); auto zoneName = req.get_zone_name(); auto zoneIdRet = getZoneId(zoneName); - if (!zoneIdRet.ok()) { - LOG(ERROR) << "Zone " << zoneName << " not found"; - handleErrorCode(cpp2::ErrorCode::E_NOT_FOUND); + if (!nebula::ok(zoneIdRet)) { + auto retCode = nebula::error(zoneIdRet); + LOG(ERROR) << "Get Zone failed, group " << zoneName + << " error: " << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } auto zoneKey = MetaServiceUtils::zoneKey(zoneName); auto zoneValueRet = doGet(std::move(zoneKey)); - if (!zoneValueRet.ok()) { - LOG(ERROR) << "Get zone " << zoneName << " failed"; - handleErrorCode(cpp2::ErrorCode::E_STORE_FAILURE); + if (!nebula::ok(zoneValueRet)) { + auto retCode = nebula::error(zoneValueRet); + LOG(ERROR) << "Get zone " << zoneName << " failed, error: " + << static_cast(retCode); + handleErrorCode(retCode); onFinished(); return; } - auto hosts = MetaServiceUtils::parseZoneHosts(std::move(zoneValueRet).value()); + auto hosts = MetaServiceUtils::parseZoneHosts(std::move(nebula::value(zoneValueRet))); auto host = req.get_node(); auto iter = std::find(hosts.begin(), hosts.end(), host); if (iter == hosts.end()) { diff --git a/src/meta/test/ActiveHostsManTest.cpp b/src/meta/test/ActiveHostsManTest.cpp index a16188e11..c6780ea55 100644 --- a/src/meta/test/ActiveHostsManTest.cpp +++ b/src/meta/test/ActiveHostsManTest.cpp @@ -57,11 +57,16 @@ TEST(ActiveHostsManTest, NormalTest) { ActiveHostsMan::updateHostInfo(kv.get(), HostAddr("0", 0), info1); ActiveHostsMan::updateHostInfo(kv.get(), HostAddr("0", 1), info1); ActiveHostsMan::updateHostInfo(kv.get(), HostAddr("0", 2), info1); - ASSERT_EQ(3, ActiveHostsMan::getActiveHosts(kv.get()).size()); + auto hostsRet = ActiveHostsMan::getActiveHosts(kv.get()); + ASSERT_TRUE(nebula::ok(hostsRet)); + ASSERT_EQ(3, nebula::value(hostsRet).size()); HostInfo info2(now + 2000, cpp2::HostRole::STORAGE, gitInfoSha()); ActiveHostsMan::updateHostInfo(kv.get(), HostAddr("0", 0), info2); - ASSERT_EQ(3, ActiveHostsMan::getActiveHosts(kv.get()).size()); + hostsRet = ActiveHostsMan::getActiveHosts(kv.get()); + ASSERT_TRUE(nebula::ok(hostsRet)); + ASSERT_EQ(3, nebula::value(hostsRet).size()); + { const auto& prefix = MetaServiceUtils::hostPrefix(); std::unique_ptr iter; @@ -84,7 +89,9 @@ TEST(ActiveHostsManTest, NormalTest) { } sleep(FLAGS_heartbeat_interval_secs * FLAGS_expired_time_factor + 1); - ASSERT_EQ(1, ActiveHostsMan::getActiveHosts(kv.get()).size()); + hostsRet = ActiveHostsMan::getActiveHosts(kv.get()); + ASSERT_TRUE(nebula::ok(hostsRet)); + ASSERT_EQ(1, nebula::value(hostsRet).size()); } TEST(ActiveHostsManTest, LeaderTest) { @@ -98,12 +105,16 @@ TEST(ActiveHostsManTest, LeaderTest) { ActiveHostsMan::updateHostInfo(kv.get(), HostAddr("0", 0), hInfo1); ActiveHostsMan::updateHostInfo(kv.get(), HostAddr("0", 1), hInfo1); ActiveHostsMan::updateHostInfo(kv.get(), HostAddr("0", 2), hInfo1); - ASSERT_EQ(3, ActiveHostsMan::getActiveHosts(kv.get()).size()); + auto hostsRet = ActiveHostsMan::getActiveHosts(kv.get()); + ASSERT_TRUE(nebula::ok(hostsRet)); + ASSERT_EQ(3, nebula::value(hostsRet).size()); std::unordered_map> leaderIds; leaderIds.emplace(1, std::vector{1, 2, 3}); ActiveHostsMan::updateHostInfo(kv.get(), HostAddr("0", 0), hInfo2, &leaderIds); - ASSERT_EQ(3, ActiveHostsMan::getActiveHosts(kv.get()).size()); + hostsRet = ActiveHostsMan::getActiveHosts(kv.get()); + ASSERT_TRUE(nebula::ok(hostsRet)); + ASSERT_EQ(3, nebula::value(hostsRet).size()); { const auto& prefix = MetaServiceUtils::leaderPrefix(); std::unique_ptr iter; @@ -132,20 +143,28 @@ TEST(ActiveHostsManTest, LeaderTest) { } sleep(FLAGS_heartbeat_interval_secs * FLAGS_expired_time_factor + 1); - ASSERT_EQ(1, ActiveHostsMan::getActiveHosts(kv.get()).size()); + hostsRet = ActiveHostsMan::getActiveHosts(kv.get()); + ASSERT_TRUE(nebula::ok(hostsRet)); + ASSERT_EQ(1, nebula::value(hostsRet).size()); } TEST(LastUpdateTimeManTest, NormalTest) { fs::TempDir rootPath("/tmp/LastUpdateTimeManTest.XXXXXX"); std::unique_ptr kv(MockCluster::initMetaKV(rootPath.path())); - ASSERT_EQ(0, LastUpdateTimeMan::get(kv.get())); + auto lastUpRet = LastUpdateTimeMan::get(kv.get()); + ASSERT_FALSE(nebula::ok(lastUpRet)); int64_t now = time::WallClock::fastNowInMilliSec(); LastUpdateTimeMan::update(kv.get(), now); - ASSERT_EQ(now, LastUpdateTimeMan::get(kv.get())); + lastUpRet = LastUpdateTimeMan::get(kv.get()); + ASSERT_TRUE(nebula::ok(lastUpRet)); + ASSERT_EQ(now, nebula::value(lastUpRet)); + LastUpdateTimeMan::update(kv.get(), now + 100); - ASSERT_EQ(now + 100, LastUpdateTimeMan::get(kv.get())); + lastUpRet = LastUpdateTimeMan::get(kv.get()); + ASSERT_TRUE(nebula::ok(lastUpRet)); + ASSERT_EQ(now + 100, nebula::value(lastUpRet)); LastUpdateTimeMan::update(kv.get(), now - 100); { diff --git a/src/meta/test/AdminClientTest.cpp b/src/meta/test/AdminClientTest.cpp index 8c4a34847..61e9cc460 100644 --- a/src/meta/test/AdminClientTest.cpp +++ b/src/meta/test/AdminClientTest.cpp @@ -291,8 +291,8 @@ TEST(AdminClientTest, RetryTest) { }); baton.wait(); auto peersRet = client->getPeers(0, 1); - CHECK(peersRet.ok()); - auto hosts = std::move(peersRet).value(); + CHECK(nebula::ok(peersRet)); + auto hosts = std::move(nebula::value(peersRet)); ASSERT_EQ(3, hosts.size()); ASSERT_EQ(Utils::getStoreAddrFromAdminAddr({localIp, rpcServer2->port_}), hosts[0]); ASSERT_EQ(Utils::getStoreAddrFromAdminAddr({localIp, rpcServer1->port_}), hosts[1]); @@ -316,7 +316,9 @@ TEST(AdminClientTest, SnapshotTest) { HostAddr storageHost = Utils::getStoreAddrFromAdminAddr(host); ActiveHostsMan::updateHostInfo(kv.get(), storageHost, HostInfo(now, meta::cpp2::HostRole::STORAGE, "")); - ASSERT_EQ(1, ActiveHostsMan::getActiveHosts(kv.get()).size()); + auto hostsRet = ActiveHostsMan::getActiveHosts(kv.get()); + ASSERT_TRUE(nebula::ok(hostsRet)); + ASSERT_EQ(1, nebula::value(hostsRet).size()); auto client = std::make_unique(kv.get()); { diff --git a/src/meta/test/BalancerTest.cpp b/src/meta/test/BalancerTest.cpp index 635dc354c..dffeae918 100644 --- a/src/meta/test/BalancerTest.cpp +++ b/src/meta/test/BalancerTest.cpp @@ -1300,8 +1300,8 @@ TEST(BalanceTest, StopPlanTest) { TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"2", 2}}); auto stopRet = balancer.stop(); - CHECK(stopRet.ok()); - ASSERT_EQ(stopRet.value(), balanceId); + CHECK(nebula::ok(stopRet)); + ASSERT_EQ(nebula::value(stopRet), balanceId); // wait until the only IN_PROGRESS task finished; sleep(3); @@ -1448,14 +1448,14 @@ TEST(BalanceTest, SimpleLeaderBalancePlanTest) { LeaderBalancePlan plan; auto leaderBalanceResult = balancer.buildLeaderBalancePlan(&hostLeaderMap, 1, 3, false, plan, false); - ASSERT_TRUE(leaderBalanceResult); + ASSERT_TRUE(nebula::ok(leaderBalanceResult) && nebula::value(leaderBalanceResult)); verifyLeaderBalancePlan(hostLeaderMap, plan, 3, 3); // check two plan build are same LeaderBalancePlan tempPlan; auto tempLeaderBalanceResult = balancer.buildLeaderBalancePlan(&tempMap, 1, 3, false, tempPlan, false); - ASSERT_TRUE(tempLeaderBalanceResult); + ASSERT_TRUE(nebula::ok(tempLeaderBalanceResult) && nebula::value(tempLeaderBalanceResult)); verifyLeaderBalancePlan(tempMap, tempPlan, 3, 3); EXPECT_EQ(plan.size(), tempPlan.size()); @@ -1472,7 +1472,7 @@ TEST(BalanceTest, SimpleLeaderBalancePlanTest) { LeaderBalancePlan plan; auto leaderBalanceResult = balancer.buildLeaderBalancePlan(&hostLeaderMap, 1, 3, false, plan, false); - ASSERT_TRUE(leaderBalanceResult); + ASSERT_TRUE(nebula::ok(leaderBalanceResult) && nebula::value(leaderBalanceResult)); verifyLeaderBalancePlan(hostLeaderMap, plan, 3, 3); } { @@ -1484,7 +1484,7 @@ TEST(BalanceTest, SimpleLeaderBalancePlanTest) { LeaderBalancePlan plan; auto leaderBalanceResult = balancer.buildLeaderBalancePlan(&hostLeaderMap, 1, 3, false, plan, false); - ASSERT_TRUE(leaderBalanceResult); + ASSERT_TRUE(nebula::ok(leaderBalanceResult) && nebula::value(leaderBalanceResult)); verifyLeaderBalancePlan(hostLeaderMap, plan, 3, 3); } { @@ -1496,7 +1496,7 @@ TEST(BalanceTest, SimpleLeaderBalancePlanTest) { LeaderBalancePlan plan; auto leaderBalanceResult = balancer.buildLeaderBalancePlan(&hostLeaderMap, 1, 3, false, plan, false); - ASSERT_TRUE(leaderBalanceResult); + ASSERT_TRUE(nebula::ok(leaderBalanceResult) && nebula::value(leaderBalanceResult)); verifyLeaderBalancePlan(hostLeaderMap, plan, 3, 3); } } @@ -1529,7 +1529,7 @@ TEST(BalanceTest, IntersectHostsLeaderBalancePlanTest) { LeaderBalancePlan plan; auto leaderBalanceResult = balancer.buildLeaderBalancePlan(&hostLeaderMap, 1, 3, false, plan, false); - ASSERT_TRUE(leaderBalanceResult); + ASSERT_TRUE(nebula::ok(leaderBalanceResult) && nebula::value(leaderBalanceResult)); verifyLeaderBalancePlan(hostLeaderMap, plan, 1, 2); } { @@ -1544,7 +1544,7 @@ TEST(BalanceTest, IntersectHostsLeaderBalancePlanTest) { LeaderBalancePlan plan; auto leaderBalanceResult = balancer.buildLeaderBalancePlan(&hostLeaderMap, 1, 3, false, plan, false); - ASSERT_TRUE(leaderBalanceResult); + ASSERT_TRUE(nebula::ok(leaderBalanceResult) && nebula::value(leaderBalanceResult)); verifyLeaderBalancePlan(hostLeaderMap, plan, 1, 2); } { @@ -1559,7 +1559,7 @@ TEST(BalanceTest, IntersectHostsLeaderBalancePlanTest) { LeaderBalancePlan plan; auto leaderBalanceResult = balancer.buildLeaderBalancePlan(&hostLeaderMap, 1, 3, false, plan, false); - ASSERT_TRUE(leaderBalanceResult); + ASSERT_TRUE(nebula::ok(leaderBalanceResult) && nebula::value(leaderBalanceResult)); verifyLeaderBalancePlan(hostLeaderMap, plan, 1, 2); } { @@ -1574,7 +1574,7 @@ TEST(BalanceTest, IntersectHostsLeaderBalancePlanTest) { LeaderBalancePlan plan; auto leaderBalanceResult = balancer.buildLeaderBalancePlan(&hostLeaderMap, 1, 3, false, plan, false); - ASSERT_TRUE(leaderBalanceResult); + ASSERT_TRUE(nebula::ok(leaderBalanceResult) && nebula::value(leaderBalanceResult)); verifyLeaderBalancePlan(hostLeaderMap, plan, 1, 2); } { @@ -1589,7 +1589,7 @@ TEST(BalanceTest, IntersectHostsLeaderBalancePlanTest) { LeaderBalancePlan plan; auto leaderBalanceResult = balancer.buildLeaderBalancePlan(&hostLeaderMap, 1, 3, false, plan, false); - ASSERT_TRUE(leaderBalanceResult); + ASSERT_TRUE(nebula::ok(leaderBalanceResult) && nebula::value(leaderBalanceResult)); verifyLeaderBalancePlan(hostLeaderMap, plan, 1, 2); } } @@ -1638,7 +1638,7 @@ TEST(BalanceTest, ManyHostsLeaderBalancePlanTest) { LeaderBalancePlan plan; auto leaderBalanceResult = balancer.buildLeaderBalancePlan(&hostLeaderMap, 1, 3, false, plan); - ASSERT_TRUE(leaderBalanceResult); + ASSERT_TRUE(nebula::ok(leaderBalanceResult) && nebula::value(leaderBalanceResult)); verifyLeaderBalancePlan(hostLeaderMap, plan, minLoad, maxLoad); } } @@ -1727,7 +1727,7 @@ TEST(BalanceTest, LeaderBalanceWithZoneTest) { LeaderBalancePlan plan; auto leaderBalanceResult = balancer.buildLeaderBalancePlan(&hostLeaderMap, 1, 3, true, plan, true); - ASSERT_TRUE(leaderBalanceResult); + ASSERT_TRUE(nebula::ok(leaderBalanceResult) && nebula::value(leaderBalanceResult)); verifyLeaderBalancePlan(hostLeaderMap, plan, 1, 2); } { @@ -1742,7 +1742,7 @@ TEST(BalanceTest, LeaderBalanceWithZoneTest) { LeaderBalancePlan plan; auto leaderBalanceResult = balancer.buildLeaderBalancePlan(&hostLeaderMap, 1, 3, true, plan, true); - ASSERT_TRUE(leaderBalanceResult); + ASSERT_TRUE(nebula::ok(leaderBalanceResult) && nebula::value(leaderBalanceResult)); verifyLeaderBalancePlan(hostLeaderMap, plan, 1, 2); } } @@ -1810,7 +1810,7 @@ TEST(BalanceTest, LeaderBalanceWithLargerZoneTest) { LeaderBalancePlan plan; auto leaderBalanceResult = balancer.buildLeaderBalancePlan(&hostLeaderMap, 1, 3, true, plan, true); - ASSERT_TRUE(leaderBalanceResult); + ASSERT_TRUE(nebula::ok(leaderBalanceResult) && nebula::value(leaderBalanceResult)); verifyLeaderBalancePlan(hostLeaderMap, plan, 0, 1); } } @@ -1937,7 +1937,7 @@ TEST(BalanceTest, LeaderBalanceWithComplexZoneTest) { LeaderBalancePlan plan; auto leaderBalanceResult = balancer.buildLeaderBalancePlan(&hostLeaderMap, 3, 3, true, plan, true); - ASSERT_TRUE(leaderBalanceResult); + ASSERT_TRUE(nebula::ok(leaderBalanceResult) && nebula::value(leaderBalanceResult)); verifyLeaderBalancePlan(hostLeaderMap, plan, 1, 9); } } diff --git a/src/meta/test/GetStatisTest.cpp b/src/meta/test/GetStatisTest.cpp index 95076bc7c..36dd4cf0c 100644 --- a/src/meta/test/GetStatisTest.cpp +++ b/src/meta/test/GetStatisTest.cpp @@ -114,15 +114,16 @@ TEST_F(GetStatisTest, StatisJob) { NiceMock adminClient; jobMgr->adminClient_ = &adminClient; auto rc = jobMgr->save(statisJob.jobKey(), statisJob.jobVal()); - ASSERT_EQ(rc, nebula::kvstore::ResultCode::SUCCEEDED); + ASSERT_EQ(rc, cpp2::ErrorCode::SUCCEEDED); { // Job is not executed, job status is QUEUE. // Statis data does not exist. - auto job1 = JobDescription::loadJobDescription(statisJob.id_, kv_.get()); - ASSERT_TRUE(job1); - ASSERT_EQ(statisJob.id_, job1.value().id_); - ASSERT_EQ(cpp2::JobStatus::QUEUE, job1.value().status_); + auto job1Ret = JobDescription::loadJobDescription(statisJob.id_, kv_.get()); + ASSERT_TRUE(nebula::ok(job1Ret)); + auto job1 = nebula::value(job1Ret); + ASSERT_EQ(statisJob.id_, job1.id_); + ASSERT_EQ(cpp2::JobStatus::QUEUE, job1.status_); cpp2::GetStatisReq req; req.set_space_id(spaceId); @@ -138,10 +139,10 @@ TEST_F(GetStatisTest, StatisJob) { auto ret = kv_->get(kDefaultSpaceId, kDefaultPartId, key, &val); ASSERT_NE(kvstore::ResultCode::SUCCEEDED, ret); - auto res = job1->setStatus(cpp2::JobStatus::RUNNING); + auto res = job1.setStatus(cpp2::JobStatus::RUNNING); ASSERT_TRUE(res); - auto retsav = jobMgr->save(job1->jobKey(), job1->jobVal()); - ASSERT_EQ(retsav, nebula::kvstore::ResultCode::SUCCEEDED); + auto retsav = jobMgr->save(job1.jobKey(), job1.jobVal()); + ASSERT_EQ(retsav, cpp2::ErrorCode::SUCCEEDED); } // Run statis job, job finished. @@ -160,10 +161,11 @@ TEST_F(GetStatisTest, StatisJob) { copyData(kv_.get(), 0, 0, statisKey, tempKey); jobMgr->jobFinished(jobId, cpp2::JobStatus::FINISHED); { - auto job2 = JobDescription::loadJobDescription(statisJob.id_, kv_.get()); - ASSERT_TRUE(job2); - ASSERT_EQ(statisJob.id_, job2.value().id_); - ASSERT_EQ(cpp2::JobStatus::FINISHED, job2.value().status_); + auto job2Ret = JobDescription::loadJobDescription(statisJob.id_, kv_.get()); + ASSERT_TRUE(nebula::ok(job2Ret)); + auto job2 = nebula::value(job2Ret); + ASSERT_EQ(statisJob.id_, job2.id_); + ASSERT_EQ(cpp2::JobStatus::FINISHED, job2.status_); cpp2::GetStatisReq req; req.set_space_id(spaceId); @@ -201,14 +203,15 @@ TEST_F(GetStatisTest, StatisJob) { std::vector paras1{"test_space"}; JobDescription statisJob2(13, cpp2::AdminCmd::STATS, paras1); auto rc2 = jobMgr->save(statisJob2.jobKey(), statisJob2.jobVal()); - ASSERT_EQ(rc2, nebula::kvstore::ResultCode::SUCCEEDED); + ASSERT_EQ(rc2, cpp2::ErrorCode::SUCCEEDED); { // Job is not executed, job status is QUEUE. // Statis data exists, but it is the result of the last statis job execution. - auto job1 = JobDescription::loadJobDescription(statisJob2.id_, kv_.get()); - ASSERT_TRUE(job1); - ASSERT_EQ(statisJob2.id_, job1.value().id_); - ASSERT_EQ(cpp2::JobStatus::QUEUE, job1.value().status_); + auto job1Ret = JobDescription::loadJobDescription(statisJob2.id_, kv_.get()); + ASSERT_TRUE(nebula::ok(job1Ret)); + auto job1 = nebula::value(job1Ret); + ASSERT_EQ(statisJob2.id_, job1.id_); + ASSERT_EQ(cpp2::JobStatus::QUEUE, job1.status_); // Success, but statis data is the result of the last statis job. cpp2::GetStatisReq req; @@ -239,24 +242,27 @@ TEST_F(GetStatisTest, StatisJob) { ASSERT_EQ(0, statisItem1.get_space_vertices()); ASSERT_EQ(0, statisItem1.get_space_edges()); - auto res = job1->setStatus(cpp2::JobStatus::RUNNING); + auto res = job1.setStatus(cpp2::JobStatus::RUNNING); ASSERT_TRUE(res); - auto retsav = jobMgr->save(job1->jobKey(), job1->jobVal()); - ASSERT_EQ(retsav, nebula::kvstore::ResultCode::SUCCEEDED); + auto retsav = jobMgr->save(job1.jobKey(), job1.jobVal()); + ASSERT_EQ(retsav, cpp2::ErrorCode::SUCCEEDED); } // Remove statis data. { auto key = MetaServiceUtils::statisKey(spaceId); folly::Baton baton; + auto retCode = nebula::kvstore::ResultCode::SUCCEEDED; kv_->asyncRemove(kDefaultSpaceId, kDefaultPartId, key, [&](nebula::kvstore::ResultCode code) { if (code != kvstore::ResultCode::SUCCEEDED) { + retCode = code; LOG(ERROR) << "kvstore asyncRemove failed: " << code; } baton.post(); }); baton.wait(); + ASSERT_EQ(nebula::kvstore::ResultCode::SUCCEEDED, retCode); // Directly find statis data in kvstore, statis data does not exist. std::string val; @@ -291,10 +297,11 @@ TEST_F(GetStatisTest, StatisJob) { jobMgr->save(statisJob2.jobKey(), statisJob2.jobVal()); { - auto job2 = JobDescription::loadJobDescription(statisJob2.id_, kv_.get()); - ASSERT_TRUE(job2); - ASSERT_EQ(statisJob2.id_, job2.value().id_); - ASSERT_EQ(cpp2::JobStatus::FINISHED, job2.value().status_); + auto job2Ret = JobDescription::loadJobDescription(statisJob2.id_, kv_.get()); + ASSERT_TRUE(nebula::ok(job2Ret)); + auto job2 = nebula::value(job2Ret); + ASSERT_EQ(statisJob2.id_, job2.id_); + ASSERT_EQ(cpp2::JobStatus::FINISHED, job2.status_); cpp2::GetStatisReq req; req.set_space_id(spaceId); @@ -339,7 +346,7 @@ TEST_F(GetStatisTest, MockSingleMachineTest) { entry.first, HostInfo(now, cpp2::HostRole::STORAGE, gitInfoSha()), &entry.second); - CHECK_EQ(ret, kvstore::ResultCode::SUCCEEDED); + ASSERT_EQ(ret, cpp2::ErrorCode::SUCCEEDED); } NiceMock adminClient; @@ -362,10 +369,11 @@ TEST_F(GetStatisTest, MockSingleMachineTest) { // check job result { sleep(1); - auto desc = JobDescription::loadJobDescription(job1.id_, kv_.get()); - ASSERT_TRUE(desc); - ASSERT_EQ(job1.id_, desc.value().id_); - ASSERT_EQ(cpp2::JobStatus::FINISHED, desc.value().status_); + auto descRet = JobDescription::loadJobDescription(job1.id_, kv_.get()); + ASSERT_TRUE(nebula::ok(descRet)); + auto desc = nebula::value(descRet); + ASSERT_EQ(job1.id_, desc.id_); + ASSERT_EQ(cpp2::JobStatus::FINISHED, desc.status_); cpp2::GetStatisReq req; req.set_space_id(spaceId); @@ -401,10 +409,11 @@ TEST_F(GetStatisTest, MockSingleMachineTest) { // check job result { sleep(1); - auto desc = JobDescription::loadJobDescription(job2.id_, kv_.get()); - ASSERT_TRUE(desc); - ASSERT_EQ(job2.id_, desc.value().id_); - ASSERT_EQ(cpp2::JobStatus::FINISHED, desc.value().status_); + auto descRet = JobDescription::loadJobDescription(job2.id_, kv_.get()); + ASSERT_TRUE(nebula::ok(descRet)); + auto desc = nebula::value(descRet); + ASSERT_EQ(job2.id_, desc.id_); + ASSERT_EQ(cpp2::JobStatus::FINISHED, desc.status_); cpp2::GetStatisReq req; req.set_space_id(spaceId); @@ -450,7 +459,7 @@ TEST_F(GetStatisTest, MockMultiMachineTest) { entry.first, HostInfo(now, cpp2::HostRole::STORAGE, gitInfoSha()), &entry.second); - CHECK_EQ(ret, kvstore::ResultCode::SUCCEEDED); + ASSERT_EQ(cpp2::ErrorCode::SUCCEEDED, ret); } NiceMock adminClient; @@ -475,10 +484,11 @@ TEST_F(GetStatisTest, MockMultiMachineTest) { // check job result { sleep(1); - auto desc = JobDescription::loadJobDescription(job.id_, kv_.get()); - ASSERT_TRUE(desc); - ASSERT_EQ(job.id_, desc.value().id_); - ASSERT_EQ(cpp2::JobStatus::FINISHED, desc.value().status_); + auto descRet = JobDescription::loadJobDescription(job.id_, kv_.get()); + ASSERT_TRUE(nebula::ok(descRet)); + auto desc = nebula::value(descRet); + ASSERT_EQ(job.id_, desc.id_); + ASSERT_EQ(cpp2::JobStatus::FINISHED, desc.status_); cpp2::GetStatisReq req; req.set_space_id(spaceId); diff --git a/src/meta/test/GroupZoneTest.cpp b/src/meta/test/GroupZoneTest.cpp index 026c81a1f..7ba29ff74 100644 --- a/src/meta/test/GroupZoneTest.cpp +++ b/src/meta/test/GroupZoneTest.cpp @@ -302,7 +302,7 @@ TEST(GroupAndZoneTest, GroupAndZoneTest) { auto f = processor->getFuture(); processor->process(req); auto resp = std::move(f).get(); - ASSERT_EQ(cpp2::ErrorCode::E_INVALID_PARM, resp.get_code()); + ASSERT_EQ(cpp2::ErrorCode::E_EXISTED, resp.get_code()); } // Group already existed although the order is different { @@ -314,7 +314,7 @@ TEST(GroupAndZoneTest, GroupAndZoneTest) { auto f = processor->getFuture(); processor->process(req); auto resp = std::move(f).get(); - ASSERT_EQ(cpp2::ErrorCode::E_INVALID_PARM, resp.get_code()); + ASSERT_EQ(cpp2::ErrorCode::E_EXISTED, resp.get_code()); } // Add Group with empty zone name list { diff --git a/src/meta/test/HBProcessorTest.cpp b/src/meta/test/HBProcessorTest.cpp index b4213ce0b..3ab132876 100644 --- a/src/meta/test/HBProcessorTest.cpp +++ b/src/meta/test/HBProcessorTest.cpp @@ -32,10 +32,14 @@ TEST(HBProcessorTest, HBTest) { auto resp = std::move(f).get(); ASSERT_EQ(cpp2::ErrorCode::SUCCEEDED, resp.get_code()); } - auto hosts = ActiveHostsMan::getActiveHosts(kv.get(), 1); - ASSERT_EQ(5, hosts.size()); + + auto hostsRet = ActiveHostsMan::getActiveHosts(kv.get(), 1);; + ASSERT_TRUE(nebula::ok(hostsRet)); + ASSERT_EQ(5, nebula::value(hostsRet).size()); sleep(3); - ASSERT_EQ(0, ActiveHostsMan::getActiveHosts(kv.get(), 1).size()); + hostsRet = ActiveHostsMan::getActiveHosts(kv.get(), 1);; + ASSERT_TRUE(nebula::ok(hostsRet)); + ASSERT_EQ(0, nebula::value(hostsRet).size()); LOG(INFO) << "Test for invalid host!"; cpp2::HBReq req; diff --git a/src/meta/test/JobManagerTest.cpp b/src/meta/test/JobManagerTest.cpp index 14a4b556b..4c9b2a32d 100644 --- a/src/meta/test/JobManagerTest.cpp +++ b/src/meta/test/JobManagerTest.cpp @@ -122,10 +122,11 @@ TEST_F(JobManagerTest, StatisJob) { job.setStatus(cpp2::JobStatus::FINISHED); jobMgr->save(job.jobKey(), job.jobVal()); - auto job1 = JobDescription::loadJobDescription(job.id_, kv_.get()); - ASSERT_TRUE(job1); - ASSERT_EQ(job.id_, job1.value().id_); - ASSERT_EQ(cpp2::JobStatus::FINISHED, job1.value().status_); + auto job1Ret = JobDescription::loadJobDescription(job.id_, kv_.get()); + ASSERT_TRUE(nebula::ok(job1Ret)); + auto job1 = nebula::value(job1Ret); + ASSERT_EQ(job.id_, job1.id_); + ASSERT_EQ(cpp2::JobStatus::FINISHED, job1.status_); } TEST_F(JobManagerTest, JobPriority) { @@ -225,15 +226,16 @@ TEST_F(JobManagerTest, loadJobDescription) { ASSERT_EQ(job1.cmd_, cpp2::AdminCmd::COMPACT); ASSERT_EQ(job1.paras_[0], "test_space"); - auto optJd2 = JobDescription::loadJobDescription(job1.id_, kv_.get()); - ASSERT_TRUE(optJd2); - ASSERT_EQ(job1.id_, optJd2.value().id_); + auto optJd2Ret = JobDescription::loadJobDescription(job1.id_, kv_.get()); + ASSERT_TRUE(nebula::ok(optJd2Ret)); + auto optJd2 = nebula::value(optJd2Ret); + ASSERT_EQ(job1.id_, optJd2.id_); LOG(INFO) << "job1.id_ = " << job1.id_; - ASSERT_EQ(job1.cmd_, optJd2.value().cmd_); - ASSERT_EQ(job1.paras_, optJd2.value().paras_); - ASSERT_EQ(job1.status_, optJd2.value().status_); - ASSERT_EQ(job1.startTime_, optJd2.value().startTime_); - ASSERT_EQ(job1.stopTime_, optJd2.value().stopTime_); + ASSERT_EQ(job1.cmd_, optJd2.cmd_); + ASSERT_EQ(job1.paras_, optJd2.paras_); + ASSERT_EQ(job1.status_, optJd2.status_); + ASSERT_EQ(job1.startTime_, optJd2.startTime_); + ASSERT_EQ(job1.stopTime_, optJd2.stopTime_); } TEST(JobUtilTest, dummy) { @@ -361,8 +363,8 @@ TEST(JobDescriptionTest, ctor2) { std::string strKey = jd1.jobKey(); std::string strVal = jd1.jobVal(); - auto optJob = JobDescription::makeJobDescription(strKey, strVal); - ASSERT_NE(optJob, folly::none); + auto optJobRet = JobDescription::makeJobDescription(strKey, strVal); + ASSERT_TRUE(nebula::ok(optJobRet)); } TEST(JobDescriptionTest, ctor3) { @@ -376,8 +378,8 @@ TEST(JobDescriptionTest, ctor3) { std::string strVal = jd1.jobVal(); folly::StringPiece spKey(&strKey[0], strKey.length()); folly::StringPiece spVal(&strVal[0], strVal.length()); - auto optJob = JobDescription::makeJobDescription(spKey, spVal); - ASSERT_NE(optJob, folly::none); + auto optJobRet = JobDescription::makeJobDescription(spKey, spVal); + ASSERT_TRUE(nebula::ok(optJobRet)); } TEST(JobDescriptionTest, parseKey) { diff --git a/src/meta/test/MetaClientTest.cpp b/src/meta/test/MetaClientTest.cpp index 556037ecb..fcc7f6205 100644 --- a/src/meta/test/MetaClientTest.cpp +++ b/src/meta/test/MetaClientTest.cpp @@ -1763,7 +1763,9 @@ TEST(MetaClientTest, HeartbeatTest) { } } sleep(FLAGS_heartbeat_interval_secs + 1); - ASSERT_EQ(1, ActiveHostsMan::getActiveHosts(cluster.metaKV_.get()).size()); + auto hostsRet = ActiveHostsMan::getActiveHosts(cluster.metaKV_.get());; + ASSERT_TRUE(nebula::ok(hostsRet)); + ASSERT_EQ(1, nebula::value(hostsRet).size()); client->unRegisterListener(); } diff --git a/src/meta/test/ProcessorTest.cpp b/src/meta/test/ProcessorTest.cpp index 43192b5f6..c5be9b857 100644 --- a/src/meta/test/ProcessorTest.cpp +++ b/src/meta/test/ProcessorTest.cpp @@ -193,15 +193,15 @@ TEST(ProcessorTest, ListPartsTest) { LeaderParts leaderParts; leaderParts[1] = {1, 2, 3, 4, 5}; auto ret = ActiveHostsMan::updateHostInfo(kv.get(), {"0", 0}, info, &leaderParts); - CHECK_EQ(ret, kvstore::ResultCode::SUCCEEDED); + ASSERT_EQ(ret, cpp2::ErrorCode::SUCCEEDED); leaderParts[1] = {6, 7, 8}; ret = ActiveHostsMan::updateHostInfo(kv.get(), {"1", 1}, info, &leaderParts); - CHECK_EQ(ret, kvstore::ResultCode::SUCCEEDED); + ASSERT_EQ(ret, cpp2::ErrorCode::SUCCEEDED); leaderParts[1] = {9}; ret = ActiveHostsMan::updateHostInfo(kv.get(), {"2", 2}, info, &leaderParts); - CHECK_EQ(ret, kvstore::ResultCode::SUCCEEDED); + ASSERT_EQ(ret, cpp2::ErrorCode::SUCCEEDED); } { diff --git a/src/meta/test/TestUtils.h b/src/meta/test/TestUtils.h index ae7fe21f6..e359b2cfc 100644 --- a/src/meta/test/TestUtils.h +++ b/src/meta/test/TestUtils.h @@ -69,7 +69,7 @@ class TestUtils { auto now = time::WallClock::fastNowInMilliSec(); for (auto& h : hosts) { auto ret = ActiveHostsMan::updateHostInfo(kv, h, HostInfo(now, role, gitInfoSha)); - CHECK_EQ(ret, kvstore::ResultCode::SUCCEEDED); + ASSERT_EQ(cpp2::ErrorCode::SUCCEEDED, ret); } }