diff --git a/net/base/mime_sniffer.cc b/net/base/mime_sniffer.cc index 61ef948211059d..ad4212dc2811af 100644 --- a/net/base/mime_sniffer.cc +++ b/net/base/mime_sniffer.cc @@ -122,7 +122,8 @@ struct MagicNumber { template class VerifySizes { - COMPILE_ASSERT(MagicSize == MaskSize, sizes_must_be_equal); + static_assert(MagicSize == MaskSize, "sizes must be equal"); + public: enum { SIZES = MagicSize }; }; diff --git a/net/base/network_change_notifier.cc b/net/base/network_change_notifier.cc index 05c72a103b8a29..34ec592f8fa27c 100644 --- a/net/base/network_change_notifier.cc +++ b/net/base/network_change_notifier.cc @@ -565,10 +565,9 @@ const char* NetworkChangeNotifier::ConnectionTypeToString( "CONNECTION_NONE", "CONNECTION_BLUETOOTH" }; - COMPILE_ASSERT( - arraysize(kConnectionTypeNames) == - NetworkChangeNotifier::CONNECTION_LAST + 1, - ConnectionType_name_count_mismatch); + static_assert(arraysize(kConnectionTypeNames) == + NetworkChangeNotifier::CONNECTION_LAST + 1, + "ConnectionType name count should match"); if (type < CONNECTION_UNKNOWN || type > CONNECTION_LAST) { NOTREACHED(); return "CONNECTION_INVALID"; diff --git a/net/base/prioritized_dispatcher_unittest.cc b/net/base/prioritized_dispatcher_unittest.cc index 40c74257ddc9a7..44ffb57780c16b 100644 --- a/net/base/prioritized_dispatcher_unittest.cc +++ b/net/base/prioritized_dispatcher_unittest.cc @@ -19,12 +19,10 @@ namespace { // We rely on the priority enum values being sequential having starting at 0, // and increasing for higher priorities. -COMPILE_ASSERT(MINIMUM_PRIORITY == 0u && - MINIMUM_PRIORITY == IDLE && - IDLE < LOWEST && - LOWEST < HIGHEST && - HIGHEST <= MAXIMUM_PRIORITY, - priority_indexes_incompatible); +static_assert(MINIMUM_PRIORITY == 0u && MINIMUM_PRIORITY == IDLE && + IDLE < LOWEST && LOWEST < HIGHEST && + HIGHEST <= MAXIMUM_PRIORITY, + "priority indexes incompatible"); class PrioritizedDispatcherTest : public testing::Test { public: diff --git a/net/cert/crl_set_storage.cc b/net/cert/crl_set_storage.cc index 2da4f929be9606..f2332ab292b9b2 100644 --- a/net/cert/crl_set_storage.cc +++ b/net/cert/crl_set_storage.cc @@ -295,7 +295,7 @@ bool CRLSetStorage::Parse(base::StringPiece data, // anything by doing this. #if defined(__BYTE_ORDER) // Linux check - COMPILE_ASSERT(__BYTE_ORDER == __LITTLE_ENDIAN, assumes_little_endian); + static_assert(__BYTE_ORDER == __LITTLE_ENDIAN, "assumes little endian"); #elif defined(__BIG_ENDIAN__) // Mac check #error assumes little endian diff --git a/net/cookies/cookie_monster.cc b/net/cookies/cookie_monster.cc index d3186471372e8a..f6895537b8c689 100644 --- a/net/cookies/cookie_monster.cc +++ b/net/cookies/cookie_monster.cc @@ -1890,8 +1890,8 @@ void CookieMonster::InternalDeleteCookie(CookieMap::iterator it, // Ideally, this would be asserted up where we define ChangeCauseMapping, // but DeletionCause's visibility (or lack thereof) forces us to make // this check here. - COMPILE_ASSERT(arraysize(ChangeCauseMapping) == DELETE_COOKIE_LAST_ENTRY + 1, - ChangeCauseMapping_size_not_eq_DeletionCause_enum_size); + static_assert(arraysize(ChangeCauseMapping) == DELETE_COOKIE_LAST_ENTRY + 1, + "ChangeCauseMapping size should match DeletionCause size"); // See InitializeHistograms() for details. if (deletion_cause != DELETE_COOKIE_DONT_RECORD) diff --git a/net/disk_cache/blockfile/backend_impl.cc b/net/disk_cache/blockfile/backend_impl.cc index 1998d331d2a119..bf76842cc8655f 100644 --- a/net/disk_cache/blockfile/backend_impl.cc +++ b/net/disk_cache/blockfile/backend_impl.cc @@ -677,7 +677,8 @@ EntryImpl* BackendImpl::OpenNextEntryImpl(Rankings::Iterator* iterator) { } bool BackendImpl::SetMaxSize(int max_bytes) { - COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model); + static_assert(sizeof(max_bytes) == sizeof(max_size_), + "unsupported int model"); if (max_bytes < 0) return false; diff --git a/net/disk_cache/blockfile/backend_impl_v3.cc b/net/disk_cache/blockfile/backend_impl_v3.cc index a1024b45409101..0b0e2326a6c44b 100644 --- a/net/disk_cache/blockfile/backend_impl_v3.cc +++ b/net/disk_cache/blockfile/backend_impl_v3.cc @@ -91,7 +91,8 @@ int BackendImplV3::Init(const CompletionCallback& callback) { // ------------------------------------------------------------------------ bool BackendImplV3::SetMaxSize(int max_bytes) { - COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model); + static_assert(sizeof(max_bytes) == sizeof(max_size_), + "unsupported int model"); if (max_bytes < 0) return false; diff --git a/net/disk_cache/blockfile/block_files.cc b/net/disk_cache/blockfile/block_files.cc index 58d25da97900a6..525141b66c2a88 100644 --- a/net/disk_cache/blockfile/block_files.cc +++ b/net/disk_cache/blockfile/block_files.cc @@ -548,7 +548,7 @@ bool BlockFiles::GrowBlockFile(MappedFile* file, BlockFileHeader* header) { } MappedFile* BlockFiles::FileForNewBlock(FileType block_type, int block_count) { - COMPILE_ASSERT(RANKINGS == 1, invalid_file_type); + static_assert(RANKINGS == 1, "invalid file type"); MappedFile* file = block_files_[block_type - 1]; BlockHeader file_header(file); diff --git a/net/disk_cache/blockfile/disk_format.h b/net/disk_cache/blockfile/disk_format.h index 95ac58b6274caf..b2e666abb2be2d 100644 --- a/net/disk_cache/blockfile/disk_format.h +++ b/net/disk_cache/blockfile/disk_format.h @@ -116,7 +116,7 @@ struct EntryStore { char key[256 - 24 * 4]; // null terminated }; -COMPILE_ASSERT(sizeof(EntryStore) == 256, bad_EntyStore); +static_assert(sizeof(EntryStore) == 256, "bad EntryStore"); const int kMaxInternalKeyLength = 4 * sizeof(EntryStore) - offsetof(EntryStore, key) - 1; @@ -146,7 +146,7 @@ struct RankingsNode { }; #pragma pack(pop) -COMPILE_ASSERT(sizeof(RankingsNode) == 36, bad_RankingsNode); +static_assert(sizeof(RankingsNode) == 36, "bad RankingsNode"); } // namespace disk_cache diff --git a/net/disk_cache/blockfile/disk_format_base.h b/net/disk_cache/blockfile/disk_format_base.h index 9f4c16e4fd4318..1c49a48e737f7f 100644 --- a/net/disk_cache/blockfile/disk_format_base.h +++ b/net/disk_cache/blockfile/disk_format_base.h @@ -61,7 +61,7 @@ struct BlockFileHeader { AllocBitmap allocation_map; }; -COMPILE_ASSERT(sizeof(BlockFileHeader) == kBlockHeaderSize, bad_header); +static_assert(sizeof(BlockFileHeader) == kBlockHeaderSize, "bad header"); // Sparse data support: // We keep a two level hierarchy to enable sparse data for an entry: the first @@ -124,8 +124,8 @@ struct SparseData { // The number of blocks stored by a child entry. const int kNumSparseBits = 1024; -COMPILE_ASSERT(sizeof(SparseData) == sizeof(SparseHeader) + kNumSparseBits / 8, - Invalid_SparseData_bitmap); +static_assert(sizeof(SparseData) == sizeof(SparseHeader) + kNumSparseBits / 8, + "invalid SparseData bitmap"); } // namespace disk_cache diff --git a/net/disk_cache/blockfile/disk_format_v3.h b/net/disk_cache/blockfile/disk_format_v3.h index f5811cc0fc42c8..f16648b5bf1a45 100644 --- a/net/disk_cache/blockfile/disk_format_v3.h +++ b/net/disk_cache/blockfile/disk_format_v3.h @@ -97,7 +97,7 @@ struct IndexBitmap { IndexHeaderV3 header; uint32 bitmap[kBaseBitmapBytes / 4]; // First page of the bitmap. }; -COMPILE_ASSERT(sizeof(IndexBitmap) == 4096, bad_IndexHeader); +static_assert(sizeof(IndexBitmap) == 4096, "bad IndexHeader"); // Possible states for a given entry. enum EntryState { @@ -109,7 +109,7 @@ enum EntryState { ENTRY_FIXING, // Inconsistent state. The entry is being verified. ENTRY_USED // The slot is in use (entry is present). }; -COMPILE_ASSERT(ENTRY_USED <= 7, state_uses_3_bits); +static_assert(ENTRY_USED <= 7, "state uses 3 bits"); enum EntryGroup { ENTRY_NO_USE = 0, // The entry has not been reused. @@ -118,7 +118,7 @@ enum EntryGroup { ENTRY_RESERVED, // Reserved for future use. ENTRY_EVICTED // The entry was deleted. }; -COMPILE_ASSERT(ENTRY_USED <= 7, group_uses_3_bits); +static_assert(ENTRY_USED <= 7, "group uses 3 bits"); #pragma pack(push, 1) struct IndexCell { @@ -183,7 +183,7 @@ struct IndexCell { uint64 first_part; uint8 last_part; }; -COMPILE_ASSERT(sizeof(IndexCell) == 9, bad_IndexCell); +static_assert(sizeof(IndexCell) == 9, "bad IndexCell"); const int kCellsPerBucket = 4; struct IndexBucket { @@ -191,7 +191,7 @@ struct IndexBucket { int32 next; uint32 hash; // The high order byte is reserved (should be zero). }; -COMPILE_ASSERT(sizeof(IndexBucket) == 44, bad_IndexBucket); +static_assert(sizeof(IndexBucket) == 44, "bad IndexBucket"); const int kBytesPerCell = 44 / kCellsPerBucket; // The main cache index. Backed by a file named index_tb1. @@ -225,7 +225,7 @@ struct EntryRecord { int32 pad[3]; uint32 self_hash; }; -COMPILE_ASSERT(sizeof(EntryRecord) == 104, bad_EntryRecord); +static_assert(sizeof(EntryRecord) == 104, "bad EntryRecord"); struct ShortEntryRecord { uint32 hash; @@ -239,7 +239,7 @@ struct ShortEntryRecord { uint32 long_hash[5]; uint32 self_hash; }; -COMPILE_ASSERT(sizeof(ShortEntryRecord) == 48, bad_ShortEntryRecord); +static_assert(sizeof(ShortEntryRecord) == 48, "bad ShortEntryRecord"); } // namespace disk_cache diff --git a/net/disk_cache/blockfile/entry_impl.cc b/net/disk_cache/blockfile/entry_impl.cc index 0a882dec6e6aa5..76bf7c7bfbb0fc 100644 --- a/net/disk_cache/blockfile/entry_impl.cc +++ b/net/disk_cache/blockfile/entry_impl.cc @@ -776,7 +776,7 @@ std::string EntryImpl::GetKey() const { if (address.is_block_file()) offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; - COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index); + static_assert(kNumStreams == kKeyFileIndex, "invalid key index"); File* key_file = const_cast(this)->GetBackingFile(address, kKeyFileIndex); if (!key_file) diff --git a/net/disk_cache/blockfile/entry_impl_v3.cc b/net/disk_cache/blockfile/entry_impl_v3.cc index c8898e069d62fd..04e9ad97dd37af 100644 --- a/net/disk_cache/blockfile/entry_impl_v3.cc +++ b/net/disk_cache/blockfile/entry_impl_v3.cc @@ -489,7 +489,7 @@ std::string EntryImplV3::GetKey() const { if (address.is_block_file()) offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; - COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index); + static_assert(kNumStreams == kKeyFileIndex, "invalid key index"); File* key_file = const_cast(this)->GetBackingFile(address, kKeyFileIndex); if (!key_file) diff --git a/net/disk_cache/blockfile/file_win.cc b/net/disk_cache/blockfile/file_win.cc index bf313128b4631d..ef823c67502449 100644 --- a/net/disk_cache/blockfile/file_win.cc +++ b/net/disk_cache/blockfile/file_win.cc @@ -26,7 +26,8 @@ struct MyOverlapped { disk_cache::FileIOCallback* callback_; }; -COMPILE_ASSERT(!offsetof(MyOverlapped, context_), starts_with_overlapped); +static_assert(offsetof(MyOverlapped, context_) == 0, + "should start with overlapped"); // Helper class to handle the IO completion notifications from the message loop. class CompletionHandler : public base::MessageLoopForIO::IOHandler { diff --git a/net/disk_cache/blockfile/stats.cc b/net/disk_cache/blockfile/stats.cc index 70592e4bded3fb..fae006d7617600 100644 --- a/net/disk_cache/blockfile/stats.cc +++ b/net/disk_cache/blockfile/stats.cc @@ -24,7 +24,7 @@ struct OnDiskStats { int data_sizes[disk_cache::Stats::kDataSizesLength]; int64 counters[disk_cache::Stats::MAX_COUNTER]; }; -COMPILE_ASSERT(sizeof(OnDiskStats) < 512, needs_more_than_2_blocks); +static_assert(sizeof(OnDiskStats) < 512, "needs more than 2 blocks"); // Returns the "floor" (as opposed to "ceiling") of log base 2 of number. int LogBase2(int32 number) { @@ -67,8 +67,8 @@ static const char* kCounterNames[] = { "Doom recent entries", "unused" }; -COMPILE_ASSERT(arraysize(kCounterNames) == disk_cache::Stats::MAX_COUNTER, - update_the_names); +static_assert(arraysize(kCounterNames) == disk_cache::Stats::MAX_COUNTER, + "update the names"); } // namespace @@ -155,7 +155,7 @@ void Stats::InitSizeHistogram() { int Stats::StorageSize() { // If we have more than 512 bytes of counters, change kDiskSignature so we // don't overwrite something else (LoadStats must fail). - COMPILE_ASSERT(sizeof(OnDiskStats) <= 256 * 2, use_more_blocks); + static_assert(sizeof(OnDiskStats) <= 256 * 2, "use more blocks"); return 256 * 2; } @@ -300,7 +300,7 @@ int Stats::GetStatsBucket(int32 size) { // From this point on, use a logarithmic scale. int result = LogBase2(size) + 1; - COMPILE_ASSERT(kDataSizesLength > 16, update_the_scale); + static_assert(kDataSizesLength > 16, "update the scale"); if (result >= kDataSizesLength) result = kDataSizesLength - 1; diff --git a/net/disk_cache/entry_unittest.cc b/net/disk_cache/entry_unittest.cc index 03cc86b9de9f66..228c654221e85c 100644 --- a/net/disk_cache/entry_unittest.cc +++ b/net/disk_cache/entry_unittest.cc @@ -670,7 +670,8 @@ void DiskCacheEntryTest::StreamAccess() { ASSERT_TRUE(NULL != entry); const int kReadBufferSize = 600; const int kFinalReadSize = kBufferSize - kReadBufferSize; - COMPILE_ASSERT(kFinalReadSize < kReadBufferSize, should_be_exactly_two_reads); + static_assert(kFinalReadSize < kReadBufferSize, + "should be exactly two reads"); scoped_refptr buffer2(new net::IOBuffer(kReadBufferSize)); for (int i = 0; i < kNumStreams; i++) { memset(buffer2->data(), 0, kReadBufferSize); diff --git a/net/disk_cache/memory/mem_backend_impl.cc b/net/disk_cache/memory/mem_backend_impl.cc index cc33a002c7f558..8350963c6e39be 100644 --- a/net/disk_cache/memory/mem_backend_impl.cc +++ b/net/disk_cache/memory/mem_backend_impl.cc @@ -76,7 +76,8 @@ bool MemBackendImpl::Init() { } bool MemBackendImpl::SetMaxSize(int max_bytes) { - COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model); + static_assert(sizeof(max_bytes) == sizeof(max_size_), + "unsupported int model"); if (max_bytes < 0) return false; diff --git a/net/disk_cache/simple/simple_entry_impl.cc b/net/disk_cache/simple/simple_entry_impl.cc index a894aff09ca1be..f8f1b8566655cd 100644 --- a/net/disk_cache/simple/simple_entry_impl.cc +++ b/net/disk_cache/simple/simple_entry_impl.cc @@ -185,14 +185,14 @@ SimpleEntryImpl::SimpleEntryImpl(net::CacheType cache_type, net_log_(net::BoundNetLog::Make( net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY)), stream_0_data_(new net::GrowableIOBuffer()) { - COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_end_offset_), - arrays_should_be_same_size); - COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_), - arrays_should_be_same_size); - COMPILE_ASSERT(arraysize(data_size_) == arraysize(have_written_), - arrays_should_be_same_size); - COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc_check_state_), - arrays_should_be_same_size); + static_assert(arraysize(data_size_) == arraysize(crc32s_end_offset_), + "arrays should be the same size"); + static_assert(arraysize(data_size_) == arraysize(crc32s_), + "arrays should be the same size"); + static_assert(arraysize(data_size_) == arraysize(have_written_), + "arrays should be the same size"); + static_assert(arraysize(data_size_) == arraysize(crc_check_state_), + "arrays should be the same size"); MakeUninitialized(); net_log_.BeginEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY, CreateNetLogSimpleEntryConstructionCallback(this)); diff --git a/net/disk_cache/simple/simple_index.h b/net/disk_cache/simple/simple_index.h index 3829a35436b00a..099d7fdb80c5ce 100644 --- a/net/disk_cache/simple/simple_index.h +++ b/net/disk_cache/simple/simple_index.h @@ -69,7 +69,7 @@ class NET_EXPORT_PRIVATE EntryMetadata { uint32 last_used_time_seconds_since_epoch_; int32 entry_size_; // Storage size in bytes. }; -COMPILE_ASSERT(sizeof(EntryMetadata) == 8, metadata_size); +static_assert(sizeof(EntryMetadata) == 8, "incorrect metadata size"); // This class is not Thread-safe. class NET_EXPORT_PRIVATE SimpleIndex diff --git a/net/disk_cache/simple/simple_test_util.h b/net/disk_cache/simple/simple_test_util.h index 82eebbec3149d1..ec9b64434c0db0 100644 --- a/net/disk_cache/simple/simple_test_util.h +++ b/net/disk_cache/simple/simple_test_util.h @@ -30,7 +30,7 @@ class ImmutableArray { template const T& at() const { - COMPILE_ASSERT(Index < size, array_out_of_bounds); + static_assert(Index < size, "array out of bounds"); return data_[Index]; } diff --git a/net/dns/dns_config_service_posix.cc b/net/dns/dns_config_service_posix.cc index 23100aee1069ea..4f6888b49b2442 100644 --- a/net/dns/dns_config_service_posix.cc +++ b/net/dns/dns_config_service_posix.cc @@ -411,9 +411,9 @@ ConfigParsePosixResult ConvertResStateToDnsConfig(const struct __res_state& res, dns_config->nameservers.push_back(ipe); } #elif defined(OS_LINUX) - COMPILE_ASSERT(arraysize(res.nsaddr_list) >= MAXNS && - arraysize(res._u._ext.nsaddrs) >= MAXNS, - incompatible_libresolv_res_state); + static_assert(arraysize(res.nsaddr_list) >= MAXNS && + arraysize(res._u._ext.nsaddrs) >= MAXNS, + "incompatible libresolv res_state"); DCHECK_LE(res.nscount, MAXNS); // Initially, glibc stores IPv6 in |_ext.nsaddrs| and IPv4 in |nsaddr_list|. // In res_send.c:res_nsend, it merges |nsaddr_list| into |nsaddrs|, diff --git a/net/dns/dns_session.cc b/net/dns/dns_session.cc index 0b6292e8e36c67..47dcefa1c579c3 100644 --- a/net/dns/dns_session.cc +++ b/net/dns/dns_session.cc @@ -272,8 +272,8 @@ base::TimeDelta DnsSession::NextTimeoutFromHistogram(unsigned server_index, int attempt) { DCHECK_LT(server_index, server_stats_.size()); - COMPILE_ASSERT(std::numeric_limits::is_signed, - histogram_base_count_assumed_to_be_signed); + static_assert(std::numeric_limits::is_signed, + "histogram base count assumed to be signed"); // Use fixed percentile of observed samples. const base::SampleVector& samples = diff --git a/net/http/http_auth.cc b/net/http/http_auth.cc index 4c6d3e8dda309e..35251140f47634 100644 --- a/net/http/http_auth.cc +++ b/net/http/http_auth.cc @@ -137,8 +137,8 @@ const char* HttpAuth::SchemeToString(Scheme scheme) { "spdyproxy", "mock", }; - COMPILE_ASSERT(arraysize(kSchemeNames) == AUTH_SCHEME_MAX, - http_auth_scheme_names_incorrect_size); + static_assert(arraysize(kSchemeNames) == AUTH_SCHEME_MAX, + "http auth scheme names incorrect size"); if (scheme < AUTH_SCHEME_BASIC || scheme >= AUTH_SCHEME_MAX) { NOTREACHED(); return "invalid_scheme"; diff --git a/net/http/http_cache_transaction.cc b/net/http/http_cache_transaction.cc index a5275f78b25d85..0ca5683b10723c 100644 --- a/net/http/http_cache_transaction.cc +++ b/net/http/http_cache_transaction.cc @@ -338,9 +338,9 @@ HttpCache::Transaction::Transaction(RequestPriority priority, HttpCache* cache) total_received_bytes_(0), websocket_handshake_stream_base_create_helper_(NULL), weak_factory_(this) { - COMPILE_ASSERT(HttpCache::Transaction::kNumValidationHeaders == - arraysize(kValidationHeaders), - Invalid_number_of_validation_headers); + static_assert(HttpCache::Transaction::kNumValidationHeaders == + arraysize(kValidationHeaders), + "invalid number of validation headers"); io_callback_ = base::Bind(&Transaction::OnIOComplete, weak_factory_.GetWeakPtr()); diff --git a/net/http/http_network_transaction_unittest.cc b/net/http/http_network_transaction_unittest.cc index 71e775ce10a697..a85e6ea9e9d448 100644 --- a/net/http/http_network_transaction_unittest.cc +++ b/net/http/http_network_transaction_unittest.cc @@ -1854,8 +1854,8 @@ TEST_P(HttpNetworkTransactionTest, KeepAliveAfterUnreadBody) { "HTTP/1.1 301 Moved Permanently", }; - COMPILE_ASSERT(kNumUnreadBodies == arraysize(kStatusLines), - forgot_to_update_kStatusLines); + static_assert(kNumUnreadBodies == arraysize(kStatusLines), + "forgot to update kStatusLines"); for (int i = 0; i < kNumUnreadBodies; ++i) EXPECT_EQ(kStatusLines[i], response_lines[i]); diff --git a/net/http/http_response_body_drainer_unittest.cc b/net/http/http_response_body_drainer_unittest.cc index 371e9a76f69008..8cce583021f591 100644 --- a/net/http/http_response_body_drainer_unittest.cc +++ b/net/http/http_response_body_drainer_unittest.cc @@ -26,9 +26,9 @@ namespace net { namespace { const int kMagicChunkSize = 1024; -COMPILE_ASSERT( - (HttpResponseBodyDrainer::kDrainBodyBufferSize % kMagicChunkSize) == 0, - chunk_size_needs_to_divide_evenly_into_buffer_size); +static_assert((HttpResponseBodyDrainer::kDrainBodyBufferSize % + kMagicChunkSize) == 0, + "chunk size needs to divide evenly into buffer size"); class CloseResultWaiter { public: diff --git a/net/http/http_security_headers.cc b/net/http/http_security_headers.cc index 1a69d5899200fc..aff4a305a2902a 100644 --- a/net/http/http_security_headers.cc +++ b/net/http/http_security_headers.cc @@ -14,7 +14,7 @@ namespace net { namespace { -COMPILE_ASSERT(kMaxHSTSAgeSecs <= kuint32max, kMaxHSTSAgeSecsTooLarge); +static_assert(kMaxHSTSAgeSecs <= kuint32max, "kMaxHSTSAgeSecs too large"); // MaxAgeToInt converts a string representation of a "whole number" of // seconds into a uint32. The string may contain an arbitrarily large number, diff --git a/net/http/http_server_properties.cc b/net/http/http_server_properties.cc index 514278cc222425..6b5f226cad0b18 100644 --- a/net/http/http_server_properties.cc +++ b/net/http/http_server_properties.cc @@ -27,9 +27,9 @@ const char* const kAlternateProtocolStrings[] = { "quic" }; -COMPILE_ASSERT( - arraysize(kAlternateProtocolStrings) == NUM_VALID_ALTERNATE_PROTOCOLS, - kAlternateProtocolStringsSize_kNumValidAlternateProtocols_not_equal); +static_assert(arraysize(kAlternateProtocolStrings) == + NUM_VALID_ALTERNATE_PROTOCOLS, + "kAlternateProtocolStrings has incorrect size"); } // namespace diff --git a/net/proxy/proxy_config_source.cc b/net/proxy/proxy_config_source.cc index f47c2f606240cf..80e2735d748d5a 100644 --- a/net/proxy/proxy_config_source.cc +++ b/net/proxy/proxy_config_source.cc @@ -22,8 +22,8 @@ const char* kSourceNames[] = { "CUSTOM", "TEST" }; -COMPILE_ASSERT(arraysize(kSourceNames) == NUM_PROXY_CONFIG_SOURCES, - source_names_incorrect_size); +static_assert(arraysize(kSourceNames) == NUM_PROXY_CONFIG_SOURCES, + "kSourceNames has incorrect size"); } // namespace diff --git a/net/quic/crypto/aes_128_gcm_12_decrypter_nss.cc b/net/quic/crypto/aes_128_gcm_12_decrypter_nss.cc index d2cd7283acceb3..c1dabedb916939 100644 --- a/net/quic/crypto/aes_128_gcm_12_decrypter_nss.cc +++ b/net/quic/crypto/aes_128_gcm_12_decrypter_nss.cc @@ -210,9 +210,9 @@ SECStatus My_Decrypt(PK11SymKey* key, Aes128Gcm12Decrypter::Aes128Gcm12Decrypter() : AeadBaseDecrypter(CKM_AES_GCM, My_Decrypt, kKeySize, kAuthTagSize, kNoncePrefixSize) { - COMPILE_ASSERT(kKeySize <= kMaxKeySize, key_size_too_big); - COMPILE_ASSERT(kNoncePrefixSize <= kMaxNoncePrefixSize, - nonce_prefix_size_too_big); + static_assert(kKeySize <= kMaxKeySize, "key size too big"); + static_assert(kNoncePrefixSize <= kMaxNoncePrefixSize, + "nonce prefix size too big"); ignore_result(g_gcm_support_checker.Get()); } diff --git a/net/quic/crypto/aes_128_gcm_12_decrypter_openssl.cc b/net/quic/crypto/aes_128_gcm_12_decrypter_openssl.cc index 109d2dae85db41..e030bba5957def 100644 --- a/net/quic/crypto/aes_128_gcm_12_decrypter_openssl.cc +++ b/net/quic/crypto/aes_128_gcm_12_decrypter_openssl.cc @@ -18,9 +18,9 @@ const size_t kNoncePrefixSize = 4; Aes128Gcm12Decrypter::Aes128Gcm12Decrypter() : AeadBaseDecrypter(EVP_aead_aes_128_gcm(), kKeySize, kAuthTagSize, kNoncePrefixSize) { - COMPILE_ASSERT(kKeySize <= kMaxKeySize, key_size_too_big); - COMPILE_ASSERT(kNoncePrefixSize <= kMaxNoncePrefixSize, - nonce_prefix_size_too_big); + static_assert(kKeySize <= kMaxKeySize, "key size too big"); + static_assert(kNoncePrefixSize <= kMaxNoncePrefixSize, + "nonce prefix size too big"); } Aes128Gcm12Decrypter::~Aes128Gcm12Decrypter() {} diff --git a/net/quic/crypto/aes_128_gcm_12_encrypter_nss.cc b/net/quic/crypto/aes_128_gcm_12_encrypter_nss.cc index 6214af540f2740..5ca33bf1d97399 100644 --- a/net/quic/crypto/aes_128_gcm_12_encrypter_nss.cc +++ b/net/quic/crypto/aes_128_gcm_12_encrypter_nss.cc @@ -209,9 +209,9 @@ SECStatus My_Encrypt(PK11SymKey* key, Aes128Gcm12Encrypter::Aes128Gcm12Encrypter() : AeadBaseEncrypter(CKM_AES_GCM, My_Encrypt, kKeySize, kAuthTagSize, kNoncePrefixSize) { - COMPILE_ASSERT(kKeySize <= kMaxKeySize, key_size_too_big); - COMPILE_ASSERT(kNoncePrefixSize <= kMaxNoncePrefixSize, - nonce_prefix_size_too_big); + static_assert(kKeySize <= kMaxKeySize, "key size too big"); + static_assert(kNoncePrefixSize <= kMaxNoncePrefixSize, + "nonce prefix size too big"); ignore_result(g_gcm_support_checker.Get()); } diff --git a/net/quic/crypto/aes_128_gcm_12_encrypter_openssl.cc b/net/quic/crypto/aes_128_gcm_12_encrypter_openssl.cc index 6489528fd37462..b87d0fc4245c74 100644 --- a/net/quic/crypto/aes_128_gcm_12_encrypter_openssl.cc +++ b/net/quic/crypto/aes_128_gcm_12_encrypter_openssl.cc @@ -18,9 +18,9 @@ const size_t kNoncePrefixSize = 4; Aes128Gcm12Encrypter::Aes128Gcm12Encrypter() : AeadBaseEncrypter(EVP_aead_aes_128_gcm(), kKeySize, kAuthTagSize, kNoncePrefixSize) { - COMPILE_ASSERT(kKeySize <= kMaxKeySize, key_size_too_big); - COMPILE_ASSERT(kNoncePrefixSize <= kMaxNoncePrefixSize, - nonce_prefix_size_too_big); + static_assert(kKeySize <= kMaxKeySize, "key size too big"); + static_assert(kNoncePrefixSize <= kMaxNoncePrefixSize, + "nonce prefix size too big"); } Aes128Gcm12Encrypter::~Aes128Gcm12Encrypter() {} diff --git a/net/quic/crypto/chacha20_poly1305_decrypter_nss.cc b/net/quic/crypto/chacha20_poly1305_decrypter_nss.cc index fbeaaeb9a2973d..0d8eff0e7374d8 100644 --- a/net/quic/crypto/chacha20_poly1305_decrypter_nss.cc +++ b/net/quic/crypto/chacha20_poly1305_decrypter_nss.cc @@ -48,9 +48,9 @@ void ChaCha20Poly1305Decrypter::FillAeadParams(StringPiece nonce, ChaCha20Poly1305Decrypter::ChaCha20Poly1305Decrypter() : AeadBaseDecrypter(CKM_NSS_CHACHA20_POLY1305, PK11_Decrypt, kKeySize, kAuthTagSize, kNoncePrefixSize) { - COMPILE_ASSERT(kKeySize <= kMaxKeySize, key_size_too_big); - COMPILE_ASSERT(kNoncePrefixSize <= kMaxNoncePrefixSize, - nonce_prefix_size_too_big); + static_assert(kKeySize <= kMaxKeySize, "key size too big"); + static_assert(kNoncePrefixSize <= kMaxNoncePrefixSize, + "nonce prefix size too big"); } ChaCha20Poly1305Decrypter::~ChaCha20Poly1305Decrypter() {} diff --git a/net/quic/crypto/chacha20_poly1305_decrypter_openssl.cc b/net/quic/crypto/chacha20_poly1305_decrypter_openssl.cc index 7f0e24da704315..cbf96e91792f90 100644 --- a/net/quic/crypto/chacha20_poly1305_decrypter_openssl.cc +++ b/net/quic/crypto/chacha20_poly1305_decrypter_openssl.cc @@ -18,9 +18,9 @@ const size_t kNoncePrefixSize = 0; ChaCha20Poly1305Decrypter::ChaCha20Poly1305Decrypter() : AeadBaseDecrypter(EVP_aead_chacha20_poly1305(), kKeySize, kAuthTagSize, kNoncePrefixSize) { - COMPILE_ASSERT(kKeySize <= kMaxKeySize, key_size_too_big); - COMPILE_ASSERT(kNoncePrefixSize <= kMaxNoncePrefixSize, - nonce_prefix_size_too_big); + static_assert(kKeySize <= kMaxKeySize, "key size too big"); + static_assert(kNoncePrefixSize <= kMaxNoncePrefixSize, + "nonce prefix size too big"); } ChaCha20Poly1305Decrypter::~ChaCha20Poly1305Decrypter() {} diff --git a/net/quic/crypto/chacha20_poly1305_encrypter_nss.cc b/net/quic/crypto/chacha20_poly1305_encrypter_nss.cc index 40cd896840c0ae..770088cb1deb37 100644 --- a/net/quic/crypto/chacha20_poly1305_encrypter_nss.cc +++ b/net/quic/crypto/chacha20_poly1305_encrypter_nss.cc @@ -48,9 +48,9 @@ void ChaCha20Poly1305Encrypter::FillAeadParams(StringPiece nonce, ChaCha20Poly1305Encrypter::ChaCha20Poly1305Encrypter() : AeadBaseEncrypter(CKM_NSS_CHACHA20_POLY1305, PK11_Encrypt, kKeySize, kAuthTagSize, kNoncePrefixSize) { - COMPILE_ASSERT(kKeySize <= kMaxKeySize, key_size_too_big); - COMPILE_ASSERT(kNoncePrefixSize <= kMaxNoncePrefixSize, - nonce_prefix_size_too_big); + static_assert(kKeySize <= kMaxKeySize, "key size too big"); + static_assert(kNoncePrefixSize <= kMaxNoncePrefixSize, + "nonce prefix size too big"); } ChaCha20Poly1305Encrypter::~ChaCha20Poly1305Encrypter() {} diff --git a/net/quic/crypto/chacha20_poly1305_encrypter_openssl.cc b/net/quic/crypto/chacha20_poly1305_encrypter_openssl.cc index e256c2a580764e..91e75d04482323 100644 --- a/net/quic/crypto/chacha20_poly1305_encrypter_openssl.cc +++ b/net/quic/crypto/chacha20_poly1305_encrypter_openssl.cc @@ -18,9 +18,9 @@ const size_t kNoncePrefixSize = 0; ChaCha20Poly1305Encrypter::ChaCha20Poly1305Encrypter() : AeadBaseEncrypter(EVP_aead_chacha20_poly1305(), kKeySize, kAuthTagSize, kNoncePrefixSize) { - COMPILE_ASSERT(kKeySize <= kMaxKeySize, key_size_too_big); - COMPILE_ASSERT(kNoncePrefixSize <= kMaxNoncePrefixSize, - nonce_prefix_size_too_big); + static_assert(kKeySize <= kMaxKeySize, "key size too big"); + static_assert(kNoncePrefixSize <= kMaxNoncePrefixSize, + "nonce prefix size too big"); } ChaCha20Poly1305Encrypter::~ChaCha20Poly1305Encrypter() {} diff --git a/net/quic/crypto/crypto_handshake.h b/net/quic/crypto/crypto_handshake.h index bfd2f347fd854d..8b5e314d519f85 100644 --- a/net/quic/crypto/crypto_handshake.h +++ b/net/quic/crypto/crypto_handshake.h @@ -80,7 +80,7 @@ enum HandshakeFailureReason { // These errors will be packed into an uint32 and we don't want to set the most // significant bit, which may be misinterpreted as the sign bit. -COMPILE_ASSERT(MAX_FAILURE_REASON <= 32, failure_reason_out_of_sync); +static_assert(MAX_FAILURE_REASON <= 32, "failure reason out of sync"); // A CrypterPair contains the encrypter and decrypter for an encryption level. struct NET_EXPORT_PRIVATE CrypterPair { diff --git a/net/quic/crypto/crypto_handshake_message.cc b/net/quic/crypto/crypto_handshake_message.cc index cc39286a6cfdc8..73fc9cf815e678 100644 --- a/net/quic/crypto/crypto_handshake_message.cc +++ b/net/quic/crypto/crypto_handshake_message.cc @@ -65,8 +65,8 @@ void CryptoHandshakeMessage::MarkDirty() { void CryptoHandshakeMessage::SetTaglist(QuicTag tag, ...) { // Warning, if sizeof(QuicTag) > sizeof(int) then this function will break // because the terminating 0 will only be promoted to int. - COMPILE_ASSERT(sizeof(QuicTag) <= sizeof(int), - crypto_tag_may_not_be_larger_than_int_or_varargs_will_break); + static_assert(sizeof(QuicTag) <= sizeof(int), + "crypto tag may not be larger than int or varargs will break"); vector tags; va_list ap; diff --git a/net/quic/crypto/crypto_server_test.cc b/net/quic/crypto/crypto_server_test.cc index 7ff7e9ec7fbd61..0902627f3d9958 100644 --- a/net/quic/crypto/crypto_server_test.cc +++ b/net/quic/crypto/crypto_server_test.cc @@ -273,7 +273,7 @@ class CryptoServerTest : public ::testing::TestWithParam { size_t expected_count) { const uint32* reject_reasons; size_t num_reject_reasons; - COMPILE_ASSERT(sizeof(QuicTag) == sizeof(uint32), header_out_of_sync); + static_assert(sizeof(QuicTag) == sizeof(uint32), "header out of sync"); QuicErrorCode error_code = out_.GetTaglist(kRREJ, &reject_reasons, &num_reject_reasons); if (!FLAGS_send_quic_crypto_reject_reason) { diff --git a/net/quic/crypto/curve25519_key_exchange.cc b/net/quic/crypto/curve25519_key_exchange.cc index 88eb0537763d95..f3d39e76001151 100644 --- a/net/quic/crypto/curve25519_key_exchange.cc +++ b/net/quic/crypto/curve25519_key_exchange.cc @@ -25,11 +25,10 @@ Curve25519KeyExchange* Curve25519KeyExchange::New( // We don't want to #include the NaCl headers in the public header file, so // we use literals for the sizes of private_key_ and public_key_. Here we // assert that those values are equal to the values from the NaCl header. - COMPILE_ASSERT( - sizeof(ka->private_key_) == crypto::curve25519::kScalarBytes, - header_out_of_sync); - COMPILE_ASSERT(sizeof(ka->public_key_) == crypto::curve25519::kBytes, - header_out_of_sync); + static_assert(sizeof(ka->private_key_) == crypto::curve25519::kScalarBytes, + "header out of sync"); + static_assert(sizeof(ka->public_key_) == crypto::curve25519::kBytes, + "header out of sync"); if (private_key.size() != crypto::curve25519::kScalarBytes) { return nullptr; diff --git a/net/quic/crypto/quic_crypto_client_config.cc b/net/quic/crypto/quic_crypto_client_config.cc index 268c8ee0f25f0d..9f7f0da7b23aaa 100644 --- a/net/quic/crypto/quic_crypto_client_config.cc +++ b/net/quic/crypto/quic_crypto_client_config.cc @@ -669,7 +669,7 @@ QuicErrorCode QuicCryptoClientConfig::ProcessRejection( const uint32* reject_reasons; size_t num_reject_reasons; - COMPILE_ASSERT(sizeof(QuicTag) == sizeof(uint32), header_out_of_sync); + static_assert(sizeof(QuicTag) == sizeof(uint32), "header out of sync"); if (rej.GetTaglist(kRREJ, &reject_reasons, &num_reject_reasons) == QUIC_NO_ERROR) { uint32 packed_error = 0; diff --git a/net/quic/crypto/quic_crypto_server_config.cc b/net/quic/crypto/quic_crypto_server_config.cc index 4a5dc8a75806da..d4bf9112a4a2c2 100644 --- a/net/quic/crypto/quic_crypto_server_config.cc +++ b/net/quic/crypto/quic_crypto_server_config.cc @@ -1175,8 +1175,8 @@ void QuicCryptoServerConfig::BuildRejection( // token. const size_t max_unverified_size = client_hello.size() * kMultiplier - kREJOverheadBytes; - COMPILE_ASSERT(kClientHelloMinimumSize * kMultiplier >= kREJOverheadBytes, - overhead_calculation_may_underflow); + static_assert(kClientHelloMinimumSize * kMultiplier >= kREJOverheadBytes, + "overhead calculation may overflow"); if (info.valid_source_address_token || signature.size() + compressed.size() < max_unverified_size) { out->SetStringPiece(kCertificateTag, compressed); @@ -1251,7 +1251,8 @@ QuicCryptoServerConfig::ParseConfigProtobuf( " Got " << orbit.size() << " want " << kOrbitSize; return nullptr; } - COMPILE_ASSERT(sizeof(config->orbit) == kOrbitSize, orbit_incorrect_size); + static_assert(sizeof(config->orbit) == kOrbitSize, + "orbit has incorrect size"); memcpy(config->orbit, orbit.data(), sizeof(config->orbit)); { @@ -1615,7 +1616,7 @@ string QuicCryptoServerConfig::NewServerNonce(QuicRandom* rand, const uint32 timestamp = static_cast(now.ToUNIXSeconds()); uint8 server_nonce[kServerNoncePlaintextSize]; - COMPILE_ASSERT(sizeof(server_nonce) > sizeof(timestamp), nonce_too_small); + static_assert(sizeof(server_nonce) > sizeof(timestamp), "nonce too small"); server_nonce[0] = static_cast(timestamp >> 24); server_nonce[1] = static_cast(timestamp >> 16); server_nonce[2] = static_cast(timestamp >> 8); @@ -1652,8 +1653,8 @@ HandshakeFailureReason QuicCryptoServerConfig::ValidateServerNonce( memcpy(server_nonce + 4, server_nonce_orbit_, sizeof(server_nonce_orbit_)); memcpy(server_nonce + 4 + sizeof(server_nonce_orbit_), plaintext.data() + 4, 20); - COMPILE_ASSERT(4 + sizeof(server_nonce_orbit_) + 20 == sizeof(server_nonce), - bad_nonce_buffer_length); + static_assert(4 + sizeof(server_nonce_orbit_) + 20 == sizeof(server_nonce), + "bad nonce buffer length"); InsertStatus nonce_error; { diff --git a/net/quic/crypto/quic_crypto_server_config.h b/net/quic/crypto/quic_crypto_server_config.h index da8713a42e40af..13797d3a55f895 100644 --- a/net/quic/crypto/quic_crypto_server_config.h +++ b/net/quic/crypto/quic_crypto_server_config.h @@ -59,7 +59,7 @@ struct ClientHelloInfo { // Errors from EvaluateClientHello. std::vector reject_reasons; - COMPILE_ASSERT(sizeof(QuicTag) == sizeof(uint32), header_out_of_sync); + static_assert(sizeof(QuicTag) == sizeof(uint32), "header out of sync"); }; namespace test { diff --git a/net/quic/port_suggester.cc b/net/quic/port_suggester.cc index 6b7940e83efae2..b1dfa484fd5425 100644 --- a/net/quic/port_suggester.cc +++ b/net/quic/port_suggester.cc @@ -16,7 +16,7 @@ PortSuggester::PortSuggester(const HostPortPair& server, uint64 seed) base::SHA1HashBytes( reinterpret_cast(server.host().data()), server.host().length(), hash_bytes); - COMPILE_ASSERT(sizeof(seed_) < sizeof(hash_bytes), seed_larger_than_hash); + static_assert(sizeof(seed_) < sizeof(hash_bytes), "seed larger than hash"); memcpy(&seed_, hash_bytes, sizeof(seed_)); seed_ ^= seed ^ server.port(); } diff --git a/net/socket/client_socket_pool_manager.cc b/net/socket/client_socket_pool_manager.cc index 8b6c17cb5f652a..cd3633661ff92b 100644 --- a/net/socket/client_socket_pool_manager.cc +++ b/net/socket/client_socket_pool_manager.cc @@ -29,9 +29,9 @@ int g_max_sockets_per_pool[] = { 256 // WEBSOCKET_SOCKET_POOL }; -COMPILE_ASSERT(arraysize(g_max_sockets_per_pool) == - HttpNetworkSession::NUM_SOCKET_POOL_TYPES, - max_sockets_per_pool_length_mismatch); +static_assert(arraysize(g_max_sockets_per_pool) == + HttpNetworkSession::NUM_SOCKET_POOL_TYPES, + "max sockets per pool length mismatch"); // Default to allow up to 6 connections per host. Experiment and tuning may // try other values (greater than 0). Too large may cause many problems, such @@ -48,9 +48,9 @@ int g_max_sockets_per_group[] = { 30 // WEBSOCKET_SOCKET_POOL }; -COMPILE_ASSERT(arraysize(g_max_sockets_per_group) == - HttpNetworkSession::NUM_SOCKET_POOL_TYPES, - max_sockets_per_group_length_mismatch); +static_assert(arraysize(g_max_sockets_per_group) == + HttpNetworkSession::NUM_SOCKET_POOL_TYPES, + "max sockets per group length mismatch"); // The max number of sockets to allow per proxy server. This applies both to // http and SOCKS proxies. See http://crbug.com/12066 and @@ -60,9 +60,9 @@ int g_max_sockets_per_proxy_server[] = { kDefaultMaxSocketsPerProxyServer // WEBSOCKET_SOCKET_POOL }; -COMPILE_ASSERT(arraysize(g_max_sockets_per_proxy_server) == - HttpNetworkSession::NUM_SOCKET_POOL_TYPES, - max_sockets_per_proxy_server_length_mismatch); +static_assert(arraysize(g_max_sockets_per_proxy_server) == + HttpNetworkSession::NUM_SOCKET_POOL_TYPES, + "max sockets per proxy server length mismatch"); // The meat of the implementation for the InitSocketHandleForHttpRequest, // InitSocketHandleForRawConnect and PreconnectSocketsForHttpRequest methods. diff --git a/net/socket/client_socket_pool_manager_impl.h b/net/socket/client_socket_pool_manager_impl.h index 515b1e118d3f9b..d6693333463f4f 100644 --- a/net/socket/client_socket_pool_manager_impl.h +++ b/net/socket/client_socket_pool_manager_impl.h @@ -41,8 +41,7 @@ template class OwnedPoolMap : public std::map { public: OwnedPoolMap() { - COMPILE_ASSERT(base::is_pointer::value, - value_must_be_a_pointer); + static_assert(base::is_pointer::value, "value must be a pointer"); } ~OwnedPoolMap() { diff --git a/net/socket/socks5_client_socket.cc b/net/socket/socks5_client_socket.cc index 681f73f26e983f..b435e135426053 100644 --- a/net/socket/socks5_client_socket.cc +++ b/net/socket/socks5_client_socket.cc @@ -25,8 +25,8 @@ const uint8 SOCKS5ClientSocket::kSOCKS5Version = 0x05; const uint8 SOCKS5ClientSocket::kTunnelCommand = 0x01; const uint8 SOCKS5ClientSocket::kNullByte = 0x00; -COMPILE_ASSERT(sizeof(struct in_addr) == 4, incorrect_system_size_of_IPv4); -COMPILE_ASSERT(sizeof(struct in6_addr) == 16, incorrect_system_size_of_IPv6); +static_assert(sizeof(struct in_addr) == 4, "incorrect system size of IPv4"); +static_assert(sizeof(struct in6_addr) == 16, "incorrect system size of IPv6"); SOCKS5ClientSocket::SOCKS5ClientSocket( scoped_ptr transport_socket, diff --git a/net/socket/socks_client_socket.cc b/net/socket/socks_client_socket.cc index ecf717f2a88f51..75eb36a76d73d8 100644 --- a/net/socket/socks_client_socket.cc +++ b/net/socket/socks_client_socket.cc @@ -44,8 +44,8 @@ struct SOCKS4ServerRequest { uint16 nw_port; uint8 ip[4]; }; -COMPILE_ASSERT(sizeof(SOCKS4ServerRequest) == kWriteHeaderSize, - socks4_server_request_struct_wrong_size); +static_assert(sizeof(SOCKS4ServerRequest) == kWriteHeaderSize, + "socks4 server request struct has incorrect size"); // A struct holding details of the SOCKS4 Server Response. struct SOCKS4ServerResponse { @@ -54,8 +54,8 @@ struct SOCKS4ServerResponse { uint16 port; uint8 ip[4]; }; -COMPILE_ASSERT(sizeof(SOCKS4ServerResponse) == kReadHeaderSize, - socks4_server_response_struct_wrong_size); +static_assert(sizeof(SOCKS4ServerResponse) == kReadHeaderSize, + "socks4 server response struct has incorrect size"); SOCKSClientSocket::SOCKSClientSocket( scoped_ptr transport_socket, diff --git a/net/spdy/spdy_http_utils.cc b/net/spdy/spdy_http_utils.cc index fc77f5d14fb003..c604880473c7c4 100644 --- a/net/spdy/spdy_http_utils.cc +++ b/net/spdy/spdy_http_utils.cc @@ -160,10 +160,8 @@ void CreateSpdyHeadersFromHttpResponse( } } - -COMPILE_ASSERT(HIGHEST - LOWEST < 4 && - HIGHEST - MINIMUM_PRIORITY < 5, - request_priority_incompatible_with_spdy); +static_assert(HIGHEST - LOWEST < 4 && HIGHEST - MINIMUM_PRIORITY < 5, + "request priority incompatible with spdy"); SpdyPriority ConvertRequestPriorityToSpdyPriority( const RequestPriority priority, diff --git a/net/spdy/spdy_session.h b/net/spdy/spdy_session.h index a468dc412da09a..234e318e51db52 100644 --- a/net/spdy/spdy_session.h +++ b/net/spdy/spdy_session.h @@ -129,10 +129,10 @@ SpdyGoAwayStatus NET_EXPORT_PRIVATE MapNetErrorToGoAwayStatus(Error err); // If these compile asserts fail then SpdyProtocolErrorDetails needs // to be updated with new values, as do the mapping functions above. -COMPILE_ASSERT(12 == SpdyFramer::LAST_ERROR, - SpdyProtocolErrorDetails_SpdyErrors_mismatch); -COMPILE_ASSERT(17 == RST_STREAM_NUM_STATUS_CODES, - SpdyProtocolErrorDetails_RstStreamStatus_mismatch); +static_assert(12 == SpdyFramer::LAST_ERROR, + "SpdyProtocolErrorDetails / Spdy Errors mismatch"); +static_assert(17 == RST_STREAM_NUM_STATUS_CODES, + "SpdyProtocolErrorDetails / RstStreamStatus mismatch"); // Splits pushed |headers| into request and response parts. Request headers are // the headers specifying resource URL. diff --git a/net/ssl/ssl_connection_status_flags.h b/net/ssl/ssl_connection_status_flags.h index e2a8193ba1929b..d3bfed26f941c0 100644 --- a/net/ssl/ssl_connection_status_flags.h +++ b/net/ssl/ssl_connection_status_flags.h @@ -49,8 +49,8 @@ enum { SSL_CONNECTION_VERSION_QUIC = 7, SSL_CONNECTION_VERSION_MAX, }; -COMPILE_ASSERT(SSL_CONNECTION_VERSION_MAX - 1 <= SSL_CONNECTION_VERSION_MASK, - SSL_CONNECTION_VERSION_MASK_too_small); +static_assert(SSL_CONNECTION_VERSION_MAX - 1 <= SSL_CONNECTION_VERSION_MASK, + "SSL_CONNECTION_VERSION_MASK too small"); inline uint16 SSLConnectionStatusToCipherSuite(int connection_status) { return static_cast(connection_status); diff --git a/net/test/url_request/url_request_mock_http_job.cc b/net/test/url_request/url_request_mock_http_job.cc index ede87cfeabdb86..1c6e35cba1d9a7 100644 --- a/net/test/url_request/url_request_mock_http_job.cc +++ b/net/test/url_request/url_request_mock_http_job.cc @@ -133,8 +133,8 @@ GURL URLRequestMockHTTPJob::GetMockUrl(const base::FilePath& path) { GURL URLRequestMockHTTPJob::GetMockUrlWithFailure(const base::FilePath& path, FailurePhase phase, int net_error) { - COMPILE_ASSERT(arraysize(kFailurePhase) == MAX_FAILURE_PHASE, - kFailurePhase_must_match_FailurePhase_enum); + static_assert(arraysize(kFailurePhase) == MAX_FAILURE_PHASE, + "kFailurePhase must match FailurePhase enum"); DCHECK_GE(phase, START); DCHECK_LE(phase, READ_SYNC); std::string url(GetMockUrl(path).spec()); diff --git a/net/tools/crash_cache/crash_cache.cc b/net/tools/crash_cache/crash_cache.cc index 9ec15bb56e1919..60d71583018f36 100644 --- a/net/tools/crash_cache/crash_cache.cc +++ b/net/tools/crash_cache/crash_cache.cc @@ -114,7 +114,7 @@ bool CreateTargetFolder(const base::FilePath& path, RankCrashes action, "remove_load2", "remove_load3" }; - COMPILE_ASSERT(arraysize(folders) == disk_cache::MAX_CRASH, sync_folders); + static_assert(arraysize(folders) == disk_cache::MAX_CRASH, "sync folders"); DCHECK(action > disk_cache::NO_CRASH && action < disk_cache::MAX_CRASH); *full_path = path.AppendASCII(folders[action]); diff --git a/net/url_request/url_request_simple_job_unittest.cc b/net/url_request/url_request_simple_job_unittest.cc index 5ee71b5766fbb6..7fbdefb4e887aa 100644 --- a/net/url_request/url_request_simple_job_unittest.cc +++ b/net/url_request/url_request_simple_job_unittest.cc @@ -20,10 +20,11 @@ namespace { const char kTestData[] = "Huge data array"; const int kRangeFirstPosition = 5; const int kRangeLastPosition = 8; -COMPILE_ASSERT(kRangeFirstPosition > 0 && - kRangeFirstPosition < kRangeLastPosition && - kRangeLastPosition < static_cast(arraysize(kTestData) - 1), - invalid_range); +static_assert(kRangeFirstPosition > 0 && + kRangeFirstPosition < kRangeLastPosition && + kRangeLastPosition < + static_cast(arraysize(kTestData) - 1), + "invalid range"); class MockSimpleJob : public URLRequestSimpleJob { public: diff --git a/net/websockets/websocket_basic_handshake_stream.cc b/net/websockets/websocket_basic_handshake_stream.cc index bac2438e2a4d45..c0f20c7266e2e0 100644 --- a/net/websockets/websocket_basic_handshake_stream.cc +++ b/net/websockets/websocket_basic_handshake_stream.cc @@ -272,8 +272,8 @@ bool ValidatePerMessageDeflateExtension(const WebSocketExtension& extension, static const char kNoContextTakeover[] = "no_context_takeover"; static const char kMaxWindowBits[] = "max_window_bits"; const size_t kPrefixLen = arraysize(kClientPrefix) - 1; - COMPILE_ASSERT(kPrefixLen == arraysize(kServerPrefix) - 1, - the_strings_server_and_client_must_be_the_same_length); + static_assert(kPrefixLen == arraysize(kServerPrefix) - 1, + "the strings server and client must be the same length"); typedef std::vector ParameterVector; DCHECK_EQ("permessage-deflate", extension.name()); diff --git a/net/websockets/websocket_channel.cc b/net/websockets/websocket_channel.cc index 30abb2db90fe8e..8e48c1e8ab623f 100644 --- a/net/websockets/websocket_channel.cc +++ b/net/websockets/websocket_channel.cc @@ -1066,8 +1066,8 @@ ChannelState WebSocketChannel::SendClose(uint16 code, body = new IOBuffer(payload_length); size = payload_length; base::WriteBigEndian(body->data(), code); - COMPILE_ASSERT(sizeof(code) == kWebSocketCloseCodeLength, - they_should_both_be_two); + static_assert(sizeof(code) == kWebSocketCloseCodeLength, + "they should both be two"); std::copy( reason.begin(), reason.end(), body->data() + kWebSocketCloseCodeLength); } @@ -1102,8 +1102,8 @@ bool WebSocketChannel::ParseClose(const scoped_refptr& buffer, const char* data = buffer->data(); uint16 unchecked_code = 0; base::ReadBigEndian(data, &unchecked_code); - COMPILE_ASSERT(sizeof(unchecked_code) == kWebSocketCloseCodeLength, - they_should_both_be_two_bytes); + static_assert(sizeof(unchecked_code) == kWebSocketCloseCodeLength, + "they should both be two bytes"); switch (unchecked_code) { case kWebSocketErrorNoStatusReceived: diff --git a/net/websockets/websocket_frame.cc b/net/websockets/websocket_frame.cc index 6fe972ba4a88e3..a90cc0a1c304b7 100644 --- a/net/websockets/websocket_frame.cc +++ b/net/websockets/websocket_frame.cc @@ -178,9 +178,9 @@ void MaskWebSocketFramePayload(const WebSocketMaskingKey& masking_key, typedef size_t PackedMaskType; PackedMaskType packed_mask_key = 0; static const size_t kPackedMaskKeySize = sizeof(packed_mask_key); - COMPILE_ASSERT((kPackedMaskKeySize >= kMaskingKeyLength && - kPackedMaskKeySize % kMaskingKeyLength == 0), - word_size_is_not_multiple_of_mask_length); + static_assert((kPackedMaskKeySize >= kMaskingKeyLength && + kPackedMaskKeySize % kMaskingKeyLength == 0), + "word size is not a multiple of mask length"); char* const end = data + data_size; // If the buffer is too small for the vectorised version to be useful, revert // to the byte-at-a-time implementation early. diff --git a/net/websockets/websocket_frame_perftest.cc b/net/websockets/websocket_frame_perftest.cc index 11fe5afab2a3ca..98ea624ac14503 100644 --- a/net/websockets/websocket_frame_perftest.cc +++ b/net/websockets/websocket_frame_perftest.cc @@ -19,9 +19,9 @@ const int kIterations = 100000; const int kLongPayloadSize = 1 << 16; const char kMaskingKey[] = "\xFE\xED\xBE\xEF"; -COMPILE_ASSERT(arraysize(kMaskingKey) == - WebSocketFrameHeader::kMaskingKeyLength + 1, - incorrect_masking_key_size); +static_assert(arraysize(kMaskingKey) == + WebSocketFrameHeader::kMaskingKeyLength + 1, + "incorrect masking key size"); class WebSocketFrameTestMaskBenchmark : public ::testing::Test { protected: diff --git a/net/websockets/websocket_frame_test.cc b/net/websockets/websocket_frame_test.cc index 104bab51e2ac6e..eca94f02647ee0 100644 --- a/net/websockets/websocket_frame_test.cc +++ b/net/websockets/websocket_frame_test.cc @@ -51,9 +51,9 @@ TEST(WebSocketFrameHeaderTest, FrameLengths) { TEST(WebSocketFrameHeaderTest, FrameLengthsWithMasking) { static const char kMaskingKey[] = "\xDE\xAD\xBE\xEF"; - COMPILE_ASSERT(arraysize(kMaskingKey) - 1 == - WebSocketFrameHeader::kMaskingKeyLength, - incorrect_masking_key_size); + static_assert( + arraysize(kMaskingKey) - 1 == WebSocketFrameHeader::kMaskingKeyLength, + "incorrect masking key size"); struct TestCase { const char* frame_header; @@ -294,8 +294,8 @@ TEST(WebSocketFrameTest, MaskPayloadAlignment) { "\x08\x12\x11\xcb\x73\x71\xf3\xc9\xcb\xf7\x34\x61\x1a\xb2\x46\x08" "\xbf\x41\x62\xba\x96\x6f\xe0\xe9\x4d\xcc\xea\x90\xd5\x2b\xbc\x16" }; - COMPILE_ASSERT(arraysize(kTestInput) == arraysize(kTestOutput), - output_and_input_arrays_have_the_same_length); + static_assert(arraysize(kTestInput) == arraysize(kTestOutput), + "output and input arrays should have the same length"); scoped_ptr scratch( static_cast( base::AlignedAlloc(kScratchBufferSize, kMaxVectorAlignment)));