Skip to content

Commit b08e8cc

Browse files
author
David Roberts
authored
[ML] Adjacency weighting fixes in categorization (#2277)
In #1903 we changed dictionary weighting in categorization to give higher weighting when there were 3 or more adjacent dictionary words. This was the first time that we'd ever had the situation where the same token could have a different weight in different messages. Unfortunately the way this interacted with us requiring equal weights when checking for common tokens meant tokens could be bizarrely removed from categories. For example, with the following two messages we'd put them in the same category but say that "started" was not a common token: - Service abcd was started - Service reaper was started This happens because "abcd" is not a dictionary word but "reaper" is, so then "started" has weight 6 in the first message but weight 31 in the second. Considering "started" to NOT be a common token in this case is extremely bad both intuitively and for the accuracy of drilldown searches. Therefore this PR changes the categorization code to consider tokens equal if their token IDs are equal but their weights are different. Weights are now only used to compute distance between different tokens. This causes the need for another change. It is no longer as simple as it used to be to calculate the highest and lowest possible total weight of a message that might possibly be considered similar to the current message. This calculation now needs to take account of possible adjacency weighting, either in the current message or in the messages being considered as matches. (This also has the side effect that we'll do a higher number of expensive Levenshtein distance calculations, as fewer potential matches will be discarded early by the simple weight check.)
1 parent d51b461 commit b08e8cc

File tree

11 files changed

+217
-104
lines changed

11 files changed

+217
-104
lines changed

docs/CHANGELOG.asciidoc

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,10 @@
5555
* Make ML native processes work with glibc 2.35 (required for Ubuntu 22.04). (See
5656
{ml-pull}2272[#2272].)
5757

58+
=== Bug Fixes
59+
60+
* Adjacency weighting fixes in categorization. (See {ml-pull}2277[#2277].)
61+
5862
== {es} version 8.2.1
5963

6064
=== Bug Fixes

include/core/CWordDictionary.h

Lines changed: 33 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -11,11 +11,11 @@
1111
#ifndef INCLUDED_ml_core_CWordDictionary_h
1212
#define INCLUDED_ml_core_CWordDictionary_h
1313

14-
#include <core/CNonCopyable.h>
1514
#include <core/ImportExport.h>
1615

1716
#include <boost/unordered_map.hpp>
1817

18+
#include <algorithm>
1919
#include <string>
2020

2121
namespace ml {
@@ -50,7 +50,7 @@ namespace core {
5050
//! too to avoid repeated locking in the instance() method (see
5151
//! Modern C++ Design by Andrei Alexandrescu for details).
5252
//!
53-
class CORE_EXPORT CWordDictionary : private CNonCopyable {
53+
class CORE_EXPORT CWordDictionary {
5454
public:
5555
//! Types of words.
5656
//! The values used are deliberately powers of two so that in the
@@ -84,6 +84,10 @@ class CORE_EXPORT CWordDictionary : private CNonCopyable {
8484
void reset() {
8585
// NO-OP
8686
}
87+
88+
std::size_t minMatchingWeight(std::size_t weight) { return weight; }
89+
90+
std::size_t maxMatchingWeight(std::size_t weight) { return weight; }
8791
};
8892

8993
using TWeightAll2 = CWeightAll<2>;
@@ -103,6 +107,10 @@ class CORE_EXPORT CWordDictionary : private CNonCopyable {
103107
void reset() {
104108
// NO-OP
105109
}
110+
111+
std::size_t minMatchingWeight(std::size_t weight) { return weight; }
112+
113+
std::size_t maxMatchingWeight(std::size_t weight) { return weight; }
106114
};
107115

108116
using TWeightVerbs5Other2 = CWeightOnePart<E_Verb, 5, 2>;
@@ -120,17 +128,28 @@ class CORE_EXPORT CWordDictionary : private CNonCopyable {
120128
}
121129

122130
std::size_t weight = (partOfSpeech == SPECIAL_PART1) ? EXTRA_WEIGHT1 : DEFAULT_EXTRA_WEIGHT;
123-
std::size_t boost =
124-
(m_NumOfAdjacentDictionaryWords > 1 ? ADJACENT_PARTS_BOOST : 1);
131+
std::size_t boost = (++m_NumOfAdjacentDictionaryWords > 2) ? ADJACENT_PARTS_BOOST
132+
: 1;
125133
weight *= boost;
126134

127-
++m_NumOfAdjacentDictionaryWords;
128-
129135
return weight;
130136
}
131137

132138
void reset() { m_NumOfAdjacentDictionaryWords = 0; }
133139

140+
std::size_t minMatchingWeight(std::size_t weight) {
141+
return (weight <= ADJACENT_PARTS_BOOST)
142+
? weight
143+
: (1 + (weight - 1) / ADJACENT_PARTS_BOOST);
144+
}
145+
146+
std::size_t maxMatchingWeight(std::size_t weight) {
147+
return (weight <= std::min(EXTRA_WEIGHT1, DEFAULT_EXTRA_WEIGHT) ||
148+
weight > std::max(EXTRA_WEIGHT1 + 1, DEFAULT_EXTRA_WEIGHT + 1))
149+
? weight
150+
: (1 + (weight - 1) * ADJACENT_PARTS_BOOST);
151+
}
152+
134153
private:
135154
std::size_t m_NumOfAdjacentDictionaryWords = 0;
136155
};
@@ -155,6 +174,10 @@ class CORE_EXPORT CWordDictionary : private CNonCopyable {
155174
void reset() {
156175
// NO-OP
157176
}
177+
178+
std::size_t minMatchingWeight(std::size_t weight) { return weight; }
179+
180+
std::size_t maxMatchingWeight(std::size_t weight) { return weight; }
158181
};
159182

160183
// Similar templates with more arguments can be added as required...
@@ -176,6 +199,10 @@ class CORE_EXPORT CWordDictionary : private CNonCopyable {
176199
//! aren't in the dictionary.
177200
EPartOfSpeech partOfSpeech(const std::string& str) const;
178201

202+
//! No copying
203+
CWordDictionary(const CWordDictionary&) = delete;
204+
CWordDictionary& operator=(const CWordDictionary&) = delete;
205+
179206
private:
180207
//! Constructor for a singleton is private
181208
CWordDictionary();

include/core/WindowsSafe.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,12 +19,6 @@
1919

2020
#include <Windows.h>
2121

22-
#ifdef min
23-
#undef min
24-
#endif
25-
#ifdef max
26-
#undef max
27-
#endif
2822
#ifdef TEXT
2923
#undef TEXT
3024
#endif

include/model/CTokenListCategory.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -119,8 +119,7 @@ class MODEL_EXPORT CTokenListCategory {
119119
return testItem.first >= commonItem.first;
120120
});
121121
if (testIter == uniqueTokenIds.end() ||
122-
testIter->first != commonItem.first ||
123-
testIter->second != commonItem.second) {
122+
testIter->first != commonItem.first) {
124123
return false;
125124
}
126125
++testIter;

include/model/CTokenListDataCategorizer.h

Lines changed: 20 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,9 @@ class CTokenListDataCategorizer : public CTokenListDataCategorizerBase {
9696
const std::string& str,
9797
TSizeSizePrVec& tokenIds,
9898
TSizeSizeMap& tokenUniqueIds,
99-
std::size_t& totalWeight) override {
99+
std::size_t& totalWeight,
100+
std::size_t& minReweightedTotalWeight,
101+
std::size_t& maxReweightedTotalWeight) override {
100102
tokenIds.clear();
101103
tokenUniqueIds.clear();
102104
totalWeight = 0;
@@ -128,8 +130,9 @@ class CTokenListDataCategorizer : public CTokenListDataCategorizerBase {
128130
}
129131
} else {
130132
if (!temp.empty()) {
131-
this->considerToken(fields, nonHexPos, temp, tokenIds,
132-
tokenUniqueIds, totalWeight);
133+
this->considerToken(fields, nonHexPos, temp, tokenIds, tokenUniqueIds,
134+
totalWeight, minReweightedTotalWeight,
135+
maxReweightedTotalWeight);
133136
temp.clear();
134137
}
135138

@@ -140,7 +143,8 @@ class CTokenListDataCategorizer : public CTokenListDataCategorizerBase {
140143
}
141144

142145
if (!temp.empty()) {
143-
this->considerToken(fields, nonHexPos, temp, tokenIds, tokenUniqueIds, totalWeight);
146+
this->considerToken(fields, nonHexPos, temp, tokenIds, tokenUniqueIds, totalWeight,
147+
minReweightedTotalWeight, maxReweightedTotalWeight);
144148
}
145149

146150
LOG_TRACE(<< str << " tokenised to " << tokenIds.size() << " tokens with total weight "
@@ -154,7 +158,9 @@ class CTokenListDataCategorizer : public CTokenListDataCategorizerBase {
154158
void tokenToIdAndWeight(const std::string& token,
155159
TSizeSizePrVec& tokenIds,
156160
TSizeSizeMap& tokenUniqueIds,
157-
std::size_t& totalWeight) override {
161+
std::size_t& totalWeight,
162+
std::size_t& minReweightedTotalWeight,
163+
std::size_t& maxReweightedTotalWeight) override {
158164
TSizeSizePr idWithWeight(this->idForToken(token), 1);
159165

160166
if (token.length() >= MIN_DICTIONARY_LENGTH) {
@@ -165,6 +171,10 @@ class CTokenListDataCategorizer : public CTokenListDataCategorizerBase {
165171
tokenIds.push_back(idWithWeight);
166172
tokenUniqueIds[idWithWeight.first] += idWithWeight.second;
167173
totalWeight += idWithWeight.second;
174+
minReweightedTotalWeight +=
175+
m_DictionaryWeightFunc.minMatchingWeight(idWithWeight.second);
176+
maxReweightedTotalWeight +=
177+
m_DictionaryWeightFunc.maxMatchingWeight(idWithWeight.second);
168178
}
169179

170180
void reset() override { m_DictionaryWeightFunc.reset(); }
@@ -225,7 +235,9 @@ class CTokenListDataCategorizer : public CTokenListDataCategorizerBase {
225235
std::string& token,
226236
TSizeSizePrVec& tokenIds,
227237
TSizeSizeMap& tokenUniqueIds,
228-
std::size_t& totalWeight) {
238+
std::size_t& totalWeight,
239+
std::size_t& minReweightedTotalWeight,
240+
std::size_t& maxReweightedTotalWeight) {
229241
if (IGNORE_LEADING_DIGIT && std::isdigit(static_cast<unsigned char>(token[0]))) {
230242
return;
231243
}
@@ -262,7 +274,8 @@ class CTokenListDataCategorizer : public CTokenListDataCategorizerBase {
262274
return;
263275
}
264276

265-
this->tokenToIdAndWeight(token, tokenIds, tokenUniqueIds, totalWeight);
277+
this->tokenToIdAndWeight(token, tokenIds, tokenUniqueIds, totalWeight,
278+
minReweightedTotalWeight, maxReweightedTotalWeight);
266279
}
267280

268281
private:

include/model/CTokenListDataCategorizerBase.h

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -219,14 +219,18 @@ class MODEL_EXPORT CTokenListDataCategorizerBase : public CDataCategorizer {
219219
const std::string& str,
220220
TSizeSizePrVec& tokenIds,
221221
TSizeSizeMap& tokenUniqueIds,
222-
std::size_t& totalWeight) = 0;
222+
std::size_t& totalWeight,
223+
std::size_t& minReweightedTotalWeight,
224+
std::size_t& maxReweightedTotalWeight) = 0;
223225

224226
//! Take a string token, convert it to a numeric ID and a weighting and
225227
//! add these to the provided data structures.
226228
virtual void tokenToIdAndWeight(const std::string& token,
227229
TSizeSizePrVec& tokenIds,
228230
TSizeSizeMap& tokenUniqueIds,
229-
std::size_t& totalWeight) = 0;
231+
std::size_t& totalWeight,
232+
std::size_t& minReweightedTotalWeight,
233+
std::size_t& maxReweightedTotalWeight) = 0;
230234

231235
virtual void reset() = 0;
232236

@@ -339,7 +343,9 @@ class MODEL_EXPORT CTokenListDataCategorizerBase : public CDataCategorizer {
339343
bool addPretokenisedTokens(const std::string& tokensCsv,
340344
TSizeSizePrVec& tokenIds,
341345
TSizeSizeMap& tokenUniqueIds,
342-
std::size_t& totalWeight);
346+
std::size_t& totalWeight,
347+
std::size_t& minReweightedTotalWeight,
348+
std::size_t& maxReweightedTotalWeight);
343349

344350
//! Get the categories that will never be detected again because the
345351
//! specified category will always be returned instead. This overload

lib/core/unittest/CWordDictionaryTest.cc

Lines changed: 79 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -58,43 +58,93 @@ BOOST_AUTO_TEST_CASE(testPartOfSpeech) {
5858
dict.partOfSpeech("a"));
5959
}
6060

61-
BOOST_AUTO_TEST_CASE(testWeightingFunctors) {
61+
BOOST_AUTO_TEST_CASE(testSimpleWeightingFunctors) {
6262
{
6363
ml::core::CWordDictionary::TWeightAll2 weighter;
6464

65-
BOOST_REQUIRE_EQUAL(size_t(0), weighter(ml::core::CWordDictionary::E_NotInDictionary));
66-
BOOST_REQUIRE_EQUAL(size_t(2), weighter(ml::core::CWordDictionary::E_UnknownPart));
67-
BOOST_REQUIRE_EQUAL(size_t(2), weighter(ml::core::CWordDictionary::E_Noun));
68-
BOOST_REQUIRE_EQUAL(size_t(2), weighter(ml::core::CWordDictionary::E_Plural));
69-
BOOST_REQUIRE_EQUAL(size_t(2), weighter(ml::core::CWordDictionary::E_Verb));
70-
BOOST_REQUIRE_EQUAL(size_t(2), weighter(ml::core::CWordDictionary::E_Adjective));
71-
BOOST_REQUIRE_EQUAL(size_t(2), weighter(ml::core::CWordDictionary::E_Adverb));
72-
BOOST_REQUIRE_EQUAL(size_t(2), weighter(ml::core::CWordDictionary::E_Conjunction));
73-
BOOST_REQUIRE_EQUAL(size_t(2), weighter(ml::core::CWordDictionary::E_Preposition));
74-
BOOST_REQUIRE_EQUAL(size_t(2), weighter(ml::core::CWordDictionary::E_Interjection));
75-
BOOST_REQUIRE_EQUAL(size_t(2), weighter(ml::core::CWordDictionary::E_Pronoun));
76-
BOOST_REQUIRE_EQUAL(size_t(2), weighter(ml::core::CWordDictionary::E_DefiniteArticle));
77-
BOOST_REQUIRE_EQUAL(size_t(2), weighter(ml::core::CWordDictionary::E_IndefiniteArticle));
65+
BOOST_REQUIRE_EQUAL(0, weighter(ml::core::CWordDictionary::E_NotInDictionary));
66+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_UnknownPart));
67+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_Noun));
68+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_Plural));
69+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_Verb));
70+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_Adjective));
71+
weighter.reset(); // should make no difference
72+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_Adverb));
73+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_Conjunction));
74+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_Preposition));
75+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_Interjection));
76+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_Pronoun));
77+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_DefiniteArticle));
78+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_IndefiniteArticle));
79+
// Any given token always gives the same weight, so min/max matching
80+
// should always be the same as the original
81+
for (std::size_t weight = 1; weight < 10; ++weight) {
82+
BOOST_REQUIRE_EQUAL(weight, weighter.minMatchingWeight(weight));
83+
BOOST_REQUIRE_EQUAL(weight, weighter.maxMatchingWeight(weight));
84+
}
7885
}
7986
{
8087
ml::core::CWordDictionary::TWeightVerbs5Other2 weighter;
8188

82-
BOOST_REQUIRE_EQUAL(size_t(0), weighter(ml::core::CWordDictionary::E_NotInDictionary));
83-
BOOST_REQUIRE_EQUAL(size_t(2), weighter(ml::core::CWordDictionary::E_UnknownPart));
84-
BOOST_REQUIRE_EQUAL(size_t(2), weighter(ml::core::CWordDictionary::E_Noun));
85-
BOOST_REQUIRE_EQUAL(size_t(2), weighter(ml::core::CWordDictionary::E_Plural));
86-
BOOST_REQUIRE_EQUAL(size_t(5), weighter(ml::core::CWordDictionary::E_Verb));
87-
BOOST_REQUIRE_EQUAL(size_t(2), weighter(ml::core::CWordDictionary::E_Adjective));
88-
BOOST_REQUIRE_EQUAL(size_t(2), weighter(ml::core::CWordDictionary::E_Adverb));
89-
BOOST_REQUIRE_EQUAL(size_t(2), weighter(ml::core::CWordDictionary::E_Conjunction));
90-
BOOST_REQUIRE_EQUAL(size_t(2), weighter(ml::core::CWordDictionary::E_Preposition));
91-
BOOST_REQUIRE_EQUAL(size_t(2), weighter(ml::core::CWordDictionary::E_Interjection));
92-
BOOST_REQUIRE_EQUAL(size_t(2), weighter(ml::core::CWordDictionary::E_Pronoun));
93-
BOOST_REQUIRE_EQUAL(size_t(2), weighter(ml::core::CWordDictionary::E_DefiniteArticle));
94-
BOOST_REQUIRE_EQUAL(size_t(2), weighter(ml::core::CWordDictionary::E_IndefiniteArticle));
89+
BOOST_REQUIRE_EQUAL(0, weighter(ml::core::CWordDictionary::E_NotInDictionary));
90+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_UnknownPart));
91+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_Noun));
92+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_Plural));
93+
weighter.reset(); // should make no difference
94+
BOOST_REQUIRE_EQUAL(5, weighter(ml::core::CWordDictionary::E_Verb));
95+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_Adjective));
96+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_Adverb));
97+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_Conjunction));
98+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_Preposition));
99+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_Interjection));
100+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_Pronoun));
101+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_DefiniteArticle));
102+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_IndefiniteArticle));
103+
// Any given token always gives the same weight, so min/max matching
104+
// should always be the same as the original
105+
for (std::size_t weight = 1; weight < 10; ++weight) {
106+
BOOST_REQUIRE_EQUAL(weight, weighter.minMatchingWeight(weight));
107+
BOOST_REQUIRE_EQUAL(weight, weighter.maxMatchingWeight(weight));
108+
}
95109
}
96110
}
97111

112+
BOOST_AUTO_TEST_CASE(testAdjacencyDependentWeightingFunctor) {
113+
ml::core::CWordDictionary::TWeightVerbs5Other2AdjacentBoost6 weighter;
114+
115+
BOOST_REQUIRE_EQUAL(0, weighter(ml::core::CWordDictionary::E_NotInDictionary));
116+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_UnknownPart));
117+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_Noun));
118+
BOOST_REQUIRE_EQUAL(12, weighter(ml::core::CWordDictionary::E_Plural));
119+
BOOST_REQUIRE_EQUAL(30, weighter(ml::core::CWordDictionary::E_Verb));
120+
weighter.reset();
121+
// Explicit reset stops adjacency multiplier
122+
BOOST_REQUIRE_EQUAL(5, weighter(ml::core::CWordDictionary::E_Verb));
123+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_Adjective));
124+
BOOST_REQUIRE_EQUAL(12, weighter(ml::core::CWordDictionary::E_Adverb));
125+
BOOST_REQUIRE_EQUAL(12, weighter(ml::core::CWordDictionary::E_Conjunction));
126+
BOOST_REQUIRE_EQUAL(0, weighter(ml::core::CWordDictionary::E_NotInDictionary));
127+
// Non-dictionary word stops adjacency multiplier
128+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_Noun));
129+
BOOST_REQUIRE_EQUAL(5, weighter(ml::core::CWordDictionary::E_Verb));
130+
weighter.reset();
131+
// Explicit reset stops adjacency multiplier
132+
BOOST_REQUIRE_EQUAL(2, weighter(ml::core::CWordDictionary::E_Adjective));
133+
134+
// Of the possible weights, 3 could map to 13 and 6 to 31 depending on
135+
// whether adjacency weighting takes place
136+
BOOST_REQUIRE_EQUAL(1, weighter.minMatchingWeight(1));
137+
BOOST_REQUIRE_EQUAL(1, weighter.maxMatchingWeight(1));
138+
BOOST_REQUIRE_EQUAL(3, weighter.minMatchingWeight(3));
139+
BOOST_REQUIRE_EQUAL(13, weighter.maxMatchingWeight(3));
140+
BOOST_REQUIRE_EQUAL(6, weighter.minMatchingWeight(6));
141+
BOOST_REQUIRE_EQUAL(31, weighter.maxMatchingWeight(6));
142+
BOOST_REQUIRE_EQUAL(3, weighter.minMatchingWeight(13));
143+
BOOST_REQUIRE_EQUAL(13, weighter.maxMatchingWeight(13));
144+
BOOST_REQUIRE_EQUAL(6, weighter.minMatchingWeight(31));
145+
BOOST_REQUIRE_EQUAL(31, weighter.maxMatchingWeight(31));
146+
}
147+
98148
// Disabled because it doesn't assert anything
99149
// Can be run on an ad hoc basis if performance is of interest
100150
BOOST_AUTO_TEST_CASE(testPerformance, *boost::unit_test::disabled()) {
@@ -104,8 +154,8 @@ BOOST_AUTO_TEST_CASE(testPerformance, *boost::unit_test::disabled()) {
104154
LOG_INFO(<< "Starting word dictionary throughput test at "
105155
<< ml::core::CTimeUtils::toTimeString(start));
106156

107-
static const size_t TEST_SIZE(100000);
108-
for (size_t count = 0; count < TEST_SIZE; ++count) {
157+
static const std::size_t TEST_SIZE(100000);
158+
for (std::size_t count = 0; count < TEST_SIZE; ++count) {
109159
dict.isInDictionary("hello");
110160
dict.isInDictionary("Hello");
111161
dict.isInDictionary("HELLO");

0 commit comments

Comments
 (0)