|
| 1 | +#include <algorithm> |
| 2 | +#include <future> |
| 3 | +#include <gtest/gtest.h> |
| 4 | +#include <iostream> |
| 5 | +#include <string> |
| 6 | +#include <thread> |
| 7 | +#include <unordered_set> |
| 8 | +#include <vector> |
| 9 | +import leetcode_test.web_crawler_multithreaded.HtmlParser; |
| 10 | +import leetcode_test.web_crawler_multithreaded.Solution; |
| 11 | +using namespace std; |
| 12 | +using namespace leetcode_test::web_crawler_multithreaded; |
| 13 | +template <class T> |
| 14 | +concept sizable = requires(T& t) { |
| 15 | + { |
| 16 | + t.size() |
| 17 | + } -> std::same_as<size_t>; |
| 18 | +}; |
| 19 | +template <class T> |
| 20 | +concept iterable = requires(T& t) { |
| 21 | + ++t.begin(); |
| 22 | + { |
| 23 | + t.begin() != t.end() |
| 24 | + } -> std::same_as<bool>; |
| 25 | +}; |
| 26 | + |
| 27 | +template <class T, typename Y> |
| 28 | +concept equalable = requires(T& t, Y& y, size_t i) { |
| 29 | + { |
| 30 | + *t.begin() == *y.begin() |
| 31 | + } -> std::same_as<bool>; |
| 32 | +}; |
| 33 | +template <typename T, typename Y> |
| 34 | + requires sizable<T> and sizable<Y> and equalable<T, Y> and iterable<T> and iterable<Y> |
| 35 | +auto assertContentEquals(const T& left, const Y& right) |
| 36 | +{ |
| 37 | + |
| 38 | + ASSERT_EQ(left.size(), right.size()); |
| 39 | + auto a = left.begin(); |
| 40 | + auto b = right.begin(); |
| 41 | + for (; b != right.end() && a != left.end(); ++a, ++b) { |
| 42 | + |
| 43 | + ASSERT_EQ(*a, *b); |
| 44 | + } |
| 45 | +} |
| 46 | +TEST(web_crawler_multithreaded, main1) |
| 47 | +{ |
| 48 | + auto urls = vector<string> { |
| 49 | + "http://news.yahoo.com", "http://news.yahoo.com/news", |
| 50 | + "http://news.yahoo.com/news/topics/", "http://news.google.com" |
| 51 | + }; |
| 52 | + auto edges = vector<pair<int, int>> { { 0, 2 }, { 2, 1 }, { 3, 2 }, { 3, 1 }, { 3, 0 } }; |
| 53 | + auto startUrl = string { "http://news.google.com" }; |
| 54 | + auto Output = vector<string> { "http://news.google.com" }; |
| 55 | + |
| 56 | + auto htmlParser = HtmlParser { urls, edges }; |
| 57 | + |
| 58 | + auto res = Solution().crawl(startUrl, htmlParser); |
| 59 | + cout << "output" << endl; |
| 60 | + for (auto& s : Output) { |
| 61 | + cout << s << endl; |
| 62 | + } |
| 63 | + cout << "result" << endl; |
| 64 | + for (auto& s : res) { |
| 65 | + cout << s << endl; |
| 66 | + } |
| 67 | + std::ranges::sort(res); |
| 68 | + std::ranges::sort(Output); |
| 69 | + assertContentEquals(res, |
| 70 | + Output); |
| 71 | + return; |
| 72 | +} |
| 73 | +TEST(web_crawler_multithreaded, main2) |
| 74 | +{ |
| 75 | + |
| 76 | + auto urls = vector<string> { "http://news.yahoo.com", "http://news.yahoo.com/news", |
| 77 | + "http://news.yahoo.com/news/topics/", |
| 78 | + "http://news.google.com", "http://news.yahoo.com/us" }; |
| 79 | + auto edges = vector<pair<int, int>> { { 2, 0 }, { 2, 1 }, { 3, 2 }, { 3, 1 }, { 0, 4 } }; |
| 80 | + auto startUrl = string { "http://news.yahoo.com/news/topics/" }; |
| 81 | + auto Output = vector<string> { |
| 82 | + "http://news.yahoo.com", "http://news.yahoo.com/news", |
| 83 | + "http://news.yahoo.com/news/topics/", "http://news.yahoo.com/us" |
| 84 | + }; |
| 85 | + |
| 86 | + auto htmlParser = HtmlParser { urls, edges }; |
| 87 | + |
| 88 | + auto res = Solution().crawl(startUrl, htmlParser); |
| 89 | + cout << "output" << endl; |
| 90 | + for (auto& s : Output) { |
| 91 | + cout << s << endl; |
| 92 | + } |
| 93 | + cout << "result" << endl; |
| 94 | + for (auto& s : res) { |
| 95 | + cout << s << endl; |
| 96 | + } |
| 97 | + std::ranges::sort(res); |
| 98 | + std::ranges::sort(Output); |
| 99 | + assertContentEquals(res, |
| 100 | + Output); |
| 101 | +} |
| 102 | +int main(int argc, char** argv) |
| 103 | +{ |
| 104 | + testing::InitGoogleTest(&argc, argv); |
| 105 | + return RUN_ALL_TESTS(); |
| 106 | +} |
0 commit comments