forked from scylladb/scylladb
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy paththrift_validation.hh
643 lines (555 loc) · 30.1 KB
/
thrift_validation.hh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
/*
* Copyright (C) 2015-present ScyllaDB
*
* Modified by ScyllaDB
*/
/*
* SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0)
*/
#pragma once
#include "schema.hh"
#include "bytes.hh"
#include "Cassandra.h"
using namespace ::cassandra;
#if 0
import java.nio.ByteBuffer;
import java.util.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.*;
import org.apache.cassandra.cql3.ColumnIdentifier;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.composites.*;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.db.filter.NamesQueryFilter;
import org.apache.cassandra.db.filter.SliceQueryFilter;
import org.apache.cassandra.db.index.SecondaryIndexManager;
import org.apache.cassandra.db.marshal.AbstractType;
import org.apache.cassandra.db.marshal.ColumnToCollectionType;
import org.apache.cassandra.db.marshal.UTF8Type;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.serializers.MarshalException;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.FBUtilities;
#endif
/**
* This has a lot of building blocks for CassandraServer to call to make sure it has valid input
* -- ensuring column names conform to the declared comparator, for instance.
*
* The methods here mostly try to do just one part of the validation so they can be combined
* for different needs -- supercolumns vs regular, range slices vs named, batch vs single-column.
* (ValidateColumnPath is the main exception in that it includes keyspace and CF validation.)
*/
namespace thrift_validation {
#if 0
private static final Logger logger = LoggerFactory.getLogger(ThriftValidation.class);
#endif
void validate_key(const schema& s, const bytes_view& key);
void validate_keyspace_not_system(const std::string& keyspace);
void validate_ks_def(const KsDef& ks_def);
void validate_cf_def(const CfDef& cf_def);
void validate_column_name(const std::string& name);
void validate_column_names(const std::vector<std::string>& names);
void validate_column(const Column& col, const column_definition& def);
#if 0
public static void validateKeyspace(String keyspaceName) throws KeyspaceNotDefinedException
{
if (!Schema.instance.getKeyspaces().contains(keyspaceName))
{
throw new KeyspaceNotDefinedException("Keyspace " + keyspaceName + " does not exist");
}
}
public static CFMetaData validateColumnFamily(String keyspaceName, String cfName, boolean isCommutativeOp) throws org.apache.cassandra.exceptions.InvalidRequestException
{
CFMetaData metadata = validateColumnFamily(keyspaceName, cfName);
if (isCommutativeOp)
{
if (!metadata.isCounter())
throw new org.apache.cassandra.exceptions.InvalidRequestException("invalid operation for non commutative table " + cfName);
}
else
{
if (metadata.isCounter())
throw new org.apache.cassandra.exceptions.InvalidRequestException("invalid operation for commutative table " + cfName);
}
return metadata;
}
/**
* validates all parts of the path to the column, including the column name
*/
public static void validateColumnPath(CFMetaData metadata, ColumnPath column_path) throws org.apache.cassandra.exceptions.InvalidRequestException
{
if (metadata.cfType == ColumnFamilyType.Standard)
{
if (column_path.super_column != null)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException("supercolumn parameter is invalid for standard CF " + metadata.cfName);
}
if (column_path.column == null)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException("column parameter is not optional for standard CF " + metadata.cfName);
}
}
else
{
if (column_path.super_column == null)
throw new org.apache.cassandra.exceptions.InvalidRequestException("supercolumn parameter is not optional for super CF " + metadata.cfName);
}
if (column_path.column != null)
{
validateColumnNames(metadata, column_path.super_column, Arrays.asList(column_path.column));
}
if (column_path.super_column != null)
{
validateColumnNames(metadata, (ByteBuffer)null, Arrays.asList(column_path.super_column));
}
}
public static void validateColumnParent(CFMetaData metadata, ColumnParent column_parent) throws org.apache.cassandra.exceptions.InvalidRequestException
{
if (metadata.cfType == ColumnFamilyType.Standard)
{
if (column_parent.super_column != null)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException("table alone is required for standard CF " + metadata.cfName);
}
}
if (column_parent.super_column != null)
{
validateColumnNames(metadata, (ByteBuffer)null, Arrays.asList(column_parent.super_column));
}
}
// column_path_or_parent is a ColumnPath for remove, where the "column" is optional even for a standard CF
static void validateColumnPathOrParent(CFMetaData metadata, ColumnPath column_path_or_parent) throws org.apache.cassandra.exceptions.InvalidRequestException
{
if (metadata.cfType == ColumnFamilyType.Standard)
{
if (column_path_or_parent.super_column != null)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException("supercolumn may not be specified for standard CF " + metadata.cfName);
}
}
if (metadata.cfType == ColumnFamilyType.Super)
{
if (column_path_or_parent.super_column == null && column_path_or_parent.column != null)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException("A column cannot be specified without specifying a super column for removal on super CF "
+ metadata.cfName);
}
}
if (column_path_or_parent.column != null)
{
validateColumnNames(metadata, column_path_or_parent.super_column, Arrays.asList(column_path_or_parent.column));
}
if (column_path_or_parent.super_column != null)
{
validateColumnNames(metadata, (ByteBuffer)null, Arrays.asList(column_path_or_parent.super_column));
}
}
/**
* Validates the column names but not the parent path or data
*/
private static void validateColumnNames(CFMetaData metadata, ByteBuffer superColumnName, Iterable<ByteBuffer> column_names)
throws org.apache.cassandra.exceptions.InvalidRequestException
{
int maxNameLength = Cell.MAX_NAME_LENGTH;
if (superColumnName != null)
{
if (superColumnName.remaining() > maxNameLength)
throw new org.apache.cassandra.exceptions.InvalidRequestException("supercolumn name length must not be greater than " + maxNameLength);
if (superColumnName.remaining() == 0)
throw new org.apache.cassandra.exceptions.InvalidRequestException("supercolumn name must not be empty");
if (metadata.cfType == ColumnFamilyType.Standard)
throw new org.apache.cassandra.exceptions.InvalidRequestException("supercolumn specified to table " + metadata.cfName + " containing normal columns");
}
AbstractType<?> comparator = SuperColumns.getComparatorFor(metadata, superColumnName);
boolean isCQL3Table = !metadata.isThriftCompatible();
for (ByteBuffer name : column_names)
{
if (name.remaining() > maxNameLength)
throw new org.apache.cassandra.exceptions.InvalidRequestException("column name length must not be greater than " + maxNameLength);
if (name.remaining() == 0)
throw new org.apache.cassandra.exceptions.InvalidRequestException("column name must not be empty");
try
{
comparator.validate(name);
}
catch (MarshalException e)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException(e.getMessage());
}
if (isCQL3Table)
{
// CQL3 table don't support having only part of their composite column names set
Composite composite = metadata.comparator.fromByteBuffer(name);
int minComponents = metadata.comparator.clusteringPrefixSize() + 1;
if (composite.size() < minComponents)
throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("Not enough components (found %d but %d expected) for column name since %s is a CQL3 table",
composite.size(), minComponents, metadata.cfName));
// Furthermore, the column name must be a declared one.
int columnIndex = metadata.comparator.clusteringPrefixSize();
ByteBuffer CQL3ColumnName = composite.get(columnIndex);
if (!CQL3ColumnName.hasRemaining())
continue; // Row marker, ok
ColumnIdentifier columnId = new ColumnIdentifier(CQL3ColumnName, metadata.comparator.subtype(columnIndex));
if (metadata.getColumnDefinition(columnId) == null)
throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("Invalid cell for CQL3 table %s. The CQL3 column component (%s) does not correspond to a defined CQL3 column",
metadata.cfName, columnId));
// On top of that, if we have a collection component, he (CQL3) column must be a collection
if (metadata.comparator.hasCollections() && composite.size() == metadata.comparator.size())
{
ColumnToCollectionType collectionType = metadata.comparator.collectionType();
if (!collectionType.defined.containsKey(CQL3ColumnName))
throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("Invalid collection component, %s is not a collection", UTF8Type.instance.getString(CQL3ColumnName)));
}
}
}
}
public static void validateColumnNames(CFMetaData metadata, ColumnParent column_parent, Iterable<ByteBuffer> column_names) throws org.apache.cassandra.exceptions.InvalidRequestException
{
validateColumnNames(metadata, column_parent.super_column, column_names);
}
public static void validateRange(CFMetaData metadata, ColumnParent column_parent, SliceRange range) throws org.apache.cassandra.exceptions.InvalidRequestException
{
if (range.count < 0)
throw new org.apache.cassandra.exceptions.InvalidRequestException("get_slice requires non-negative count");
int maxNameLength = Cell.MAX_NAME_LENGTH;
if (range.start.remaining() > maxNameLength)
throw new org.apache.cassandra.exceptions.InvalidRequestException("range start length cannot be larger than " + maxNameLength);
if (range.finish.remaining() > maxNameLength)
throw new org.apache.cassandra.exceptions.InvalidRequestException("range finish length cannot be larger than " + maxNameLength);
AbstractType<?> comparator = SuperColumns.getComparatorFor(metadata, column_parent.super_column);
try
{
comparator.validate(range.start);
comparator.validate(range.finish);
}
catch (MarshalException e)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException(e.getMessage());
}
Comparator<ByteBuffer> orderedComparator = range.isReversed() ? comparator.reverseComparator : comparator;
if (range.start.remaining() > 0
&& range.finish.remaining() > 0
&& orderedComparator.compare(range.start, range.finish) > 0)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException("range finish must come after start in the order of traversal");
}
}
public static void validateColumnOrSuperColumn(CFMetaData metadata, ColumnOrSuperColumn cosc)
throws org.apache.cassandra.exceptions.InvalidRequestException
{
boolean isCommutative = metadata.isCounter();
int nulls = 0;
if (cosc.column == null) nulls++;
if (cosc.super_column == null) nulls++;
if (cosc.counter_column == null) nulls++;
if (cosc.counter_super_column == null) nulls++;
if (nulls != 3)
throw new org.apache.cassandra.exceptions.InvalidRequestException("ColumnOrSuperColumn must have one (and only one) of column, super_column, counter and counter_super_column");
if (cosc.column != null)
{
if (isCommutative)
throw new org.apache.cassandra.exceptions.InvalidRequestException("invalid operation for commutative table " + metadata.cfName);
validateTtl(cosc.column);
validateColumnPath(metadata, new ColumnPath(metadata.cfName).setSuper_column((ByteBuffer)null).setColumn(cosc.column.name));
validateColumnData(metadata, null, cosc.column);
}
if (cosc.super_column != null)
{
if (isCommutative)
throw new org.apache.cassandra.exceptions.InvalidRequestException("invalid operation for commutative table " + metadata.cfName);
for (Column c : cosc.super_column.columns)
{
validateColumnPath(metadata, new ColumnPath(metadata.cfName).setSuper_column(cosc.super_column.name).setColumn(c.name));
validateColumnData(metadata, cosc.super_column.name, c);
}
}
if (cosc.counter_column != null)
{
if (!isCommutative)
throw new org.apache.cassandra.exceptions.InvalidRequestException("invalid operation for non commutative table " + metadata.cfName);
validateColumnPath(metadata, new ColumnPath(metadata.cfName).setSuper_column((ByteBuffer)null).setColumn(cosc.counter_column.name));
}
if (cosc.counter_super_column != null)
{
if (!isCommutative)
throw new org.apache.cassandra.exceptions.InvalidRequestException("invalid operation for non commutative table " + metadata.cfName);
for (CounterColumn c : cosc.counter_super_column.columns)
validateColumnPath(metadata, new ColumnPath(metadata.cfName).setSuper_column(cosc.counter_super_column.name).setColumn(c.name));
}
}
private static void validateTtl(Column column) throws org.apache.cassandra.exceptions.InvalidRequestException
{
if (column.isSetTtl())
{
if (column.ttl <= 0)
throw new org.apache.cassandra.exceptions.InvalidRequestException("ttl must be positive");
if (column.ttl > ExpiringCell.MAX_TTL)
throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("ttl is too large. requested (%d) maximum (%d)", column.ttl, ExpiringCell.MAX_TTL));
}
else
{
// if it's not set, then it should be zero -- here we are just checking to make sure Thrift doesn't change that contract with us.
assert column.ttl == 0;
}
}
public static void validateMutation(CFMetaData metadata, Mutation mut)
throws org.apache.cassandra.exceptions.InvalidRequestException
{
ColumnOrSuperColumn cosc = mut.column_or_supercolumn;
Deletion del = mut.deletion;
int nulls = 0;
if (cosc == null) nulls++;
if (del == null) nulls++;
if (nulls != 1)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException("mutation must have one and only one of column_or_supercolumn or deletion");
}
if (cosc != null)
{
validateColumnOrSuperColumn(metadata, cosc);
}
else
{
validateDeletion(metadata, del);
}
}
public static void validateDeletion(CFMetaData metadata, Deletion del) throws org.apache.cassandra.exceptions.InvalidRequestException
{
if (del.super_column != null)
validateColumnNames(metadata, (ByteBuffer)null, Arrays.asList(del.super_column));
if (del.predicate != null)
validateSlicePredicate(metadata, del.super_column, del.predicate);
if (metadata.cfType == ColumnFamilyType.Standard && del.super_column != null)
{
String msg = String.format("Deletion of super columns is not possible on a standard table (KeySpace=%s Table=%s Deletion=%s)", metadata.ksName, metadata.cfName, del);
throw new org.apache.cassandra.exceptions.InvalidRequestException(msg);
}
if (metadata.isCounter())
{
// forcing server timestamp even if a timestamp was set for coherence with other counter operation
del.timestamp = System.currentTimeMillis();
}
else if (!del.isSetTimestamp())
{
throw new org.apache.cassandra.exceptions.InvalidRequestException("Deletion timestamp is not optional for non commutative table " + metadata.cfName);
}
}
public static void validateSlicePredicate(CFMetaData metadata, ByteBuffer scName, SlicePredicate predicate) throws org.apache.cassandra.exceptions.InvalidRequestException
{
if (predicate.column_names == null && predicate.slice_range == null)
throw new org.apache.cassandra.exceptions.InvalidRequestException("A SlicePredicate must be given a list of Columns, a SliceRange, or both");
if (predicate.slice_range != null)
validateRange(metadata, new ColumnParent(metadata.cfName).setSuper_column(scName), predicate.slice_range);
if (predicate.column_names != null)
validateColumnNames(metadata, scName, predicate.column_names);
}
/**
* Validates the data part of the column (everything in the column object but the name, which is assumed to be valid)
*/
public static void validateColumnData(CFMetaData metadata, ByteBuffer scName, Column column) throws org.apache.cassandra.exceptions.InvalidRequestException
{
validateTtl(column);
if (!column.isSetValue())
throw new org.apache.cassandra.exceptions.InvalidRequestException("Column value is required");
if (!column.isSetTimestamp())
throw new org.apache.cassandra.exceptions.InvalidRequestException("Column timestamp is required");
CellName cn = scName == null
? metadata.comparator.cellFromByteBuffer(column.name)
: metadata.comparator.makeCellName(scName, column.name);
try
{
AbstractType<?> validator = metadata.getValueValidator(cn);
if (validator != null)
validator.validate(column.value);
}
catch (MarshalException me)
{
if (logger.isDebugEnabled())
logger.debug("rejecting invalid value {}", ByteBufferUtil.bytesToHex(summarize(column.value)));
throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("(%s) [%s][%s][%s] failed validation",
me.getMessage(),
metadata.ksName,
metadata.cfName,
(SuperColumns.getComparatorFor(metadata, scName != null)).getString(column.name)));
}
// Indexed column values cannot be larger than 64K. See CASSANDRA-3057/4240 for more details
if (!Keyspace.open(metadata.ksName).getColumnFamilyStore(metadata.cfName).indexManager.validate(asDBColumn(cn, column)))
throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("Can't index column value of size %d for index %s in CF %s of KS %s",
column.value.remaining(),
metadata.getColumnDefinition(cn).getIndexName(),
metadata.cfName,
metadata.ksName));
}
private static Cell asDBColumn(CellName name, Column column)
{
if (column.ttl <= 0)
return new BufferCell(name, column.value, column.timestamp);
else
return new BufferExpiringCell(name, column.value, column.timestamp, column.ttl);
}
/**
* Return, at most, the first 64K of the buffer. This avoids very large column values being
* logged in their entirety.
*/
private static ByteBuffer summarize(ByteBuffer buffer)
{
int MAX = Short.MAX_VALUE;
if (buffer.remaining() <= MAX)
return buffer;
return (ByteBuffer) buffer.slice().limit(buffer.position() + MAX);
}
public static void validatePredicate(CFMetaData metadata, ColumnParent column_parent, SlicePredicate predicate)
throws org.apache.cassandra.exceptions.InvalidRequestException
{
if (predicate.column_names == null && predicate.slice_range == null)
throw new org.apache.cassandra.exceptions.InvalidRequestException("predicate column_names and slice_range may not both be null");
if (predicate.column_names != null && predicate.slice_range != null)
throw new org.apache.cassandra.exceptions.InvalidRequestException("predicate column_names and slice_range may not both be present");
if (predicate.getSlice_range() != null)
validateRange(metadata, column_parent, predicate.slice_range);
else
validateColumnNames(metadata, column_parent, predicate.column_names);
}
public static void validateKeyRange(CFMetaData metadata, ByteBuffer superColumn, KeyRange range) throws org.apache.cassandra.exceptions.InvalidRequestException
{
if ((range.start_key == null) == (range.start_token == null)
|| (range.end_key == null) == (range.end_token == null))
{
throw new org.apache.cassandra.exceptions.InvalidRequestException("exactly one each of {start key, start token} and {end key, end token} must be specified");
}
// (key, token) is supported (for wide-row CFRR) but not (token, key)
if (range.start_token != null && range.end_key != null)
throw new org.apache.cassandra.exceptions.InvalidRequestException("start token + end key is not a supported key range");
IPartitioner p = StorageService.getPartitioner();
if (range.start_key != null && range.end_key != null)
{
Token startToken = p.getToken(range.start_key);
Token endToken = p.getToken(range.end_key);
if (startToken.compareTo(endToken) > 0 && !endToken.isMinimum())
{
if (p.preservesOrder())
throw new org.apache.cassandra.exceptions.InvalidRequestException("start key must sort before (or equal to) finish key in your partitioner!");
else
throw new org.apache.cassandra.exceptions.InvalidRequestException("start key's token sorts after end key's token. this is not allowed; you probably should not specify end key at all except with an ordered partitioner");
}
}
else if (range.start_key != null && range.end_token != null)
{
// start_token/end_token can wrap, but key/token should not
RowPosition stop = p.getTokenFactory().fromString(range.end_token).maxKeyBound();
if (RowPosition.ForKey.get(range.start_key, p).compareTo(stop) > 0 && !stop.isMinimum())
throw new org.apache.cassandra.exceptions.InvalidRequestException("Start key's token sorts after end token");
}
validateFilterClauses(metadata, range.row_filter);
if (!isEmpty(range.row_filter) && superColumn != null)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException("super columns are not supported for indexing");
}
if (range.count <= 0)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException("maxRows must be positive");
}
}
private static boolean isEmpty(List<IndexExpression> clause)
{
return clause == null || clause.isEmpty();
}
public static void validateIndexClauses(CFMetaData metadata, IndexClause index_clause)
throws org.apache.cassandra.exceptions.InvalidRequestException
{
if (index_clause.expressions.isEmpty())
throw new org.apache.cassandra.exceptions.InvalidRequestException("index clause list may not be empty");
if (!validateFilterClauses(metadata, index_clause.expressions))
throw new org.apache.cassandra.exceptions.InvalidRequestException("No indexed columns present in index clause with operator EQ");
}
// return true if index_clause contains an indexed columns with operator EQ
public static boolean validateFilterClauses(CFMetaData metadata, List<IndexExpression> index_clause)
throws org.apache.cassandra.exceptions.InvalidRequestException
{
if (isEmpty(index_clause))
// no filter to apply
return false;
SecondaryIndexManager idxManager = Keyspace.open(metadata.ksName).getColumnFamilyStore(metadata.cfName).indexManager;
AbstractType<?> nameValidator = SuperColumns.getComparatorFor(metadata, null);
boolean isIndexed = false;
for (IndexExpression expression : index_clause)
{
try
{
nameValidator.validate(expression.column_name);
}
catch (MarshalException me)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("[%s]=[%s] failed name validation (%s)",
ByteBufferUtil.bytesToHex(expression.column_name),
ByteBufferUtil.bytesToHex(expression.value),
me.getMessage()));
}
if (expression.value.remaining() > 0xFFFF)
throw new org.apache.cassandra.exceptions.InvalidRequestException("Index expression values may not be larger than 64K");
CellName name = metadata.comparator.cellFromByteBuffer(expression.column_name);
AbstractType<?> valueValidator = metadata.getValueValidator(name);
try
{
valueValidator.validate(expression.value);
}
catch (MarshalException me)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("[%s]=[%s] failed value validation (%s)",
ByteBufferUtil.bytesToHex(expression.column_name),
ByteBufferUtil.bytesToHex(expression.value),
me.getMessage()));
}
isIndexed |= (expression.op == IndexOperator.EQ) && idxManager.indexes(name);
}
return isIndexed;
}
public static void validateKeyspaceNotYetExisting(String newKsName) throws org.apache.cassandra.exceptions.InvalidRequestException
{
// keyspace names must be unique case-insensitively because the keyspace name becomes the directory
// where we store CF sstables. Names that differ only in case would thus cause problems on
// case-insensitive filesystems (NTFS, most installations of HFS+).
for (String ksName : Schema.instance.getKeyspaces())
{
if (ksName.equalsIgnoreCase(newKsName))
throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("Keyspace names must be case-insensitively unique (\"%s\" conflicts with \"%s\")",
newKsName,
ksName));
}
}
public static void validateKeyspaceNotSystem(String modifiedKeyspace) throws org.apache.cassandra.exceptions.InvalidRequestException
{
if (modifiedKeyspace.equalsIgnoreCase(SystemKeyspace.NAME))
throw new org.apache.cassandra.exceptions.InvalidRequestException("system keyspace is not user-modifiable");
}
public static IDiskAtomFilter asIFilter(SlicePredicate sp, CFMetaData metadata, ByteBuffer superColumn)
{
SliceRange sr = sp.slice_range;
IDiskAtomFilter filter;
CellNameType comparator = metadata.isSuper()
? new SimpleDenseCellNameType(metadata.comparator.subtype(superColumn == null ? 0 : 1))
: metadata.comparator;
if (sr == null)
{
SortedSet<CellName> ss = new TreeSet<CellName>(comparator);
for (ByteBuffer bb : sp.column_names)
ss.add(comparator.cellFromByteBuffer(bb));
filter = new NamesQueryFilter(ss);
}
else
{
filter = new SliceQueryFilter(comparator.fromByteBuffer(sr.start),
comparator.fromByteBuffer(sr.finish),
sr.reversed,
sr.count);
}
if (metadata.isSuper())
filter = SuperColumns.fromSCFilter(metadata.comparator, superColumn, filter);
return filter;
}
}
#endif
}