forked from gcc-mirror/gcc
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlex.c
3828 lines (3357 loc) · 108 KB
/
lex.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* CPP Library - lexical analysis.
Copyright (C) 2000-2019 Free Software Foundation, Inc.
Contributed by Per Bothner, 1994-95.
Based on CCCP program by Paul Rubin, June 1986
Adapted to ANSI C, Richard Stallman, Jan 1987
Broken out to separate file, Zack Weinberg, Mar 2000
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "cpplib.h"
#include "internal.h"
enum spell_type
{
SPELL_OPERATOR = 0,
SPELL_IDENT,
SPELL_LITERAL,
SPELL_NONE
};
struct token_spelling
{
enum spell_type category;
const unsigned char *name;
};
static const unsigned char *const digraph_spellings[] =
{ UC"%:", UC"%:%:", UC"<:", UC":>", UC"<%", UC"%>" };
#define OP(e, s) { SPELL_OPERATOR, UC s },
#define TK(e, s) { SPELL_ ## s, UC #e },
static const struct token_spelling token_spellings[N_TTYPES] = { TTYPE_TABLE };
#undef OP
#undef TK
#define TOKEN_SPELL(token) (token_spellings[(token)->type].category)
#define TOKEN_NAME(token) (token_spellings[(token)->type].name)
static void add_line_note (cpp_buffer *, const uchar *, unsigned int);
static int skip_line_comment (cpp_reader *);
static void skip_whitespace (cpp_reader *, cppchar_t);
static void lex_string (cpp_reader *, cpp_token *, const uchar *);
static void save_comment (cpp_reader *, cpp_token *, const uchar *, cppchar_t);
static void store_comment (cpp_reader *, cpp_token *);
static void create_literal (cpp_reader *, cpp_token *, const uchar *,
unsigned int, enum cpp_ttype);
static bool warn_in_comment (cpp_reader *, _cpp_line_note *);
static int name_p (cpp_reader *, const cpp_string *);
static tokenrun *next_tokenrun (tokenrun *);
static _cpp_buff *new_buff (size_t);
/* Utility routine:
Compares, the token TOKEN to the NUL-terminated string STRING.
TOKEN must be a CPP_NAME. Returns 1 for equal, 0 for unequal. */
int
cpp_ideq (const cpp_token *token, const char *string)
{
if (token->type != CPP_NAME)
return 0;
return !ustrcmp (NODE_NAME (token->val.node.node), (const uchar *) string);
}
/* Record a note TYPE at byte POS into the current cleaned logical
line. */
static void
add_line_note (cpp_buffer *buffer, const uchar *pos, unsigned int type)
{
if (buffer->notes_used == buffer->notes_cap)
{
buffer->notes_cap = buffer->notes_cap * 2 + 200;
buffer->notes = XRESIZEVEC (_cpp_line_note, buffer->notes,
buffer->notes_cap);
}
buffer->notes[buffer->notes_used].pos = pos;
buffer->notes[buffer->notes_used].type = type;
buffer->notes_used++;
}
/* Fast path to find line special characters using optimized character
scanning algorithms. Anything complicated falls back to the slow
path below. Since this loop is very hot it's worth doing these kinds
of optimizations.
One of the paths through the ifdefs should provide
const uchar *search_line_fast (const uchar *s, const uchar *end);
Between S and END, search for \n, \r, \\, ?. Return a pointer to
the found character.
Note that the last character of the buffer is *always* a newline,
as forced by _cpp_convert_input. This fact can be used to avoid
explicitly looking for the end of the buffer. */
/* Configure gives us an ifdef test. */
#ifndef WORDS_BIGENDIAN
#define WORDS_BIGENDIAN 0
#endif
/* We'd like the largest integer that fits into a register. There's nothing
in <stdint.h> that gives us that. For most hosts this is unsigned long,
but MS decided on an LLP64 model. Thankfully when building with GCC we
can get the "real" word size. */
#ifdef __GNUC__
typedef unsigned int word_type __attribute__((__mode__(__word__)));
#else
typedef unsigned long word_type;
#endif
/* The code below is only expecting sizes 4 or 8.
Die at compile-time if this expectation is violated. */
typedef char check_word_type_size
[(sizeof(word_type) == 8 || sizeof(word_type) == 4) * 2 - 1];
/* Return X with the first N bytes forced to values that won't match one
of the interesting characters. Note that NUL is not interesting. */
static inline word_type
acc_char_mask_misalign (word_type val, unsigned int n)
{
word_type mask = -1;
if (WORDS_BIGENDIAN)
mask >>= n * 8;
else
mask <<= n * 8;
return val & mask;
}
/* Return X replicated to all byte positions within WORD_TYPE. */
static inline word_type
acc_char_replicate (uchar x)
{
word_type ret;
ret = (x << 24) | (x << 16) | (x << 8) | x;
if (sizeof(word_type) == 8)
ret = (ret << 16 << 16) | ret;
return ret;
}
/* Return non-zero if some byte of VAL is (probably) C. */
static inline word_type
acc_char_cmp (word_type val, word_type c)
{
#if defined(__GNUC__) && defined(__alpha__)
/* We can get exact results using a compare-bytes instruction.
Get (val == c) via (0 >= (val ^ c)). */
return __builtin_alpha_cmpbge (0, val ^ c);
#else
word_type magic = 0x7efefefeU;
if (sizeof(word_type) == 8)
magic = (magic << 16 << 16) | 0xfefefefeU;
magic |= 1;
val ^= c;
return ((val + magic) ^ ~val) & ~magic;
#endif
}
/* Given the result of acc_char_cmp is non-zero, return the index of
the found character. If this was a false positive, return -1. */
static inline int
acc_char_index (word_type cmp ATTRIBUTE_UNUSED,
word_type val ATTRIBUTE_UNUSED)
{
#if defined(__GNUC__) && defined(__alpha__) && !WORDS_BIGENDIAN
/* The cmpbge instruction sets *bits* of the result corresponding to
matches in the bytes with no false positives. */
return __builtin_ctzl (cmp);
#else
unsigned int i;
/* ??? It would be nice to force unrolling here,
and have all of these constants folded. */
for (i = 0; i < sizeof(word_type); ++i)
{
uchar c;
if (WORDS_BIGENDIAN)
c = (val >> (sizeof(word_type) - i - 1) * 8) & 0xff;
else
c = (val >> i * 8) & 0xff;
if (c == '\n' || c == '\r' || c == '\\' || c == '?')
return i;
}
return -1;
#endif
}
/* A version of the fast scanner using bit fiddling techniques.
For 32-bit words, one would normally perform 16 comparisons and
16 branches. With this algorithm one performs 24 arithmetic
operations and one branch. Whether this is faster with a 32-bit
word size is going to be somewhat system dependent.
For 64-bit words, we eliminate twice the number of comparisons
and branches without increasing the number of arithmetic operations.
It's almost certainly going to be a win with 64-bit word size. */
static const uchar * search_line_acc_char (const uchar *, const uchar *)
ATTRIBUTE_UNUSED;
static const uchar *
search_line_acc_char (const uchar *s, const uchar *end ATTRIBUTE_UNUSED)
{
const word_type repl_nl = acc_char_replicate ('\n');
const word_type repl_cr = acc_char_replicate ('\r');
const word_type repl_bs = acc_char_replicate ('\\');
const word_type repl_qm = acc_char_replicate ('?');
unsigned int misalign;
const word_type *p;
word_type val, t;
/* Align the buffer. Mask out any bytes from before the beginning. */
p = (word_type *)((uintptr_t)s & -sizeof(word_type));
val = *p;
misalign = (uintptr_t)s & (sizeof(word_type) - 1);
if (misalign)
val = acc_char_mask_misalign (val, misalign);
/* Main loop. */
while (1)
{
t = acc_char_cmp (val, repl_nl);
t |= acc_char_cmp (val, repl_cr);
t |= acc_char_cmp (val, repl_bs);
t |= acc_char_cmp (val, repl_qm);
if (__builtin_expect (t != 0, 0))
{
int i = acc_char_index (t, val);
if (i >= 0)
return (const uchar *)p + i;
}
val = *++p;
}
}
/* Disable on Solaris 2/x86 until the following problem can be properly
autoconfed:
The Solaris 10+ assembler tags objects with the instruction set
extensions used, so SSE4.2 executables cannot run on machines that
don't support that extension. */
#if (GCC_VERSION >= 4005) && (__GNUC__ >= 5 || !defined(__PIC__)) && (defined(__i386__) || defined(__x86_64__)) && !(defined(__sun__) && defined(__svr4__))
/* Replicated character data to be shared between implementations.
Recall that outside of a context with vector support we can't
define compatible vector types, therefore these are all defined
in terms of raw characters. */
static const char repl_chars[4][16] __attribute__((aligned(16))) = {
{ '\n', '\n', '\n', '\n', '\n', '\n', '\n', '\n',
'\n', '\n', '\n', '\n', '\n', '\n', '\n', '\n' },
{ '\r', '\r', '\r', '\r', '\r', '\r', '\r', '\r',
'\r', '\r', '\r', '\r', '\r', '\r', '\r', '\r' },
{ '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\',
'\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' },
{ '?', '?', '?', '?', '?', '?', '?', '?',
'?', '?', '?', '?', '?', '?', '?', '?' },
};
/* A version of the fast scanner using MMX vectorized byte compare insns.
This uses the PMOVMSKB instruction which was introduced with "MMX2",
which was packaged into SSE1; it is also present in the AMD MMX
extension. Mark the function as using "sse" so that we emit a real
"emms" instruction, rather than the 3dNOW "femms" instruction. */
static const uchar *
#ifndef __SSE__
__attribute__((__target__("sse")))
#endif
search_line_mmx (const uchar *s, const uchar *end ATTRIBUTE_UNUSED)
{
typedef char v8qi __attribute__ ((__vector_size__ (8)));
typedef int __m64 __attribute__ ((__vector_size__ (8), __may_alias__));
const v8qi repl_nl = *(const v8qi *)repl_chars[0];
const v8qi repl_cr = *(const v8qi *)repl_chars[1];
const v8qi repl_bs = *(const v8qi *)repl_chars[2];
const v8qi repl_qm = *(const v8qi *)repl_chars[3];
unsigned int misalign, found, mask;
const v8qi *p;
v8qi data, t, c;
/* Align the source pointer. While MMX doesn't generate unaligned data
faults, this allows us to safely scan to the end of the buffer without
reading beyond the end of the last page. */
misalign = (uintptr_t)s & 7;
p = (const v8qi *)((uintptr_t)s & -8);
data = *p;
/* Create a mask for the bytes that are valid within the first
16-byte block. The Idea here is that the AND with the mask
within the loop is "free", since we need some AND or TEST
insn in order to set the flags for the branch anyway. */
mask = -1u << misalign;
/* Main loop processing 8 bytes at a time. */
goto start;
do
{
data = *++p;
mask = -1;
start:
t = __builtin_ia32_pcmpeqb(data, repl_nl);
c = __builtin_ia32_pcmpeqb(data, repl_cr);
t = (v8qi) __builtin_ia32_por ((__m64)t, (__m64)c);
c = __builtin_ia32_pcmpeqb(data, repl_bs);
t = (v8qi) __builtin_ia32_por ((__m64)t, (__m64)c);
c = __builtin_ia32_pcmpeqb(data, repl_qm);
t = (v8qi) __builtin_ia32_por ((__m64)t, (__m64)c);
found = __builtin_ia32_pmovmskb (t);
found &= mask;
}
while (!found);
__builtin_ia32_emms ();
/* FOUND contains 1 in bits for which we matched a relevant
character. Conversion to the byte index is trivial. */
found = __builtin_ctz(found);
return (const uchar *)p + found;
}
/* A version of the fast scanner using SSE2 vectorized byte compare insns. */
static const uchar *
#ifndef __SSE2__
__attribute__((__target__("sse2")))
#endif
search_line_sse2 (const uchar *s, const uchar *end ATTRIBUTE_UNUSED)
{
typedef char v16qi __attribute__ ((__vector_size__ (16)));
const v16qi repl_nl = *(const v16qi *)repl_chars[0];
const v16qi repl_cr = *(const v16qi *)repl_chars[1];
const v16qi repl_bs = *(const v16qi *)repl_chars[2];
const v16qi repl_qm = *(const v16qi *)repl_chars[3];
unsigned int misalign, found, mask;
const v16qi *p;
v16qi data, t;
/* Align the source pointer. */
misalign = (uintptr_t)s & 15;
p = (const v16qi *)((uintptr_t)s & -16);
data = *p;
/* Create a mask for the bytes that are valid within the first
16-byte block. The Idea here is that the AND with the mask
within the loop is "free", since we need some AND or TEST
insn in order to set the flags for the branch anyway. */
mask = -1u << misalign;
/* Main loop processing 16 bytes at a time. */
goto start;
do
{
data = *++p;
mask = -1;
start:
t = __builtin_ia32_pcmpeqb128(data, repl_nl);
t |= __builtin_ia32_pcmpeqb128(data, repl_cr);
t |= __builtin_ia32_pcmpeqb128(data, repl_bs);
t |= __builtin_ia32_pcmpeqb128(data, repl_qm);
found = __builtin_ia32_pmovmskb128 (t);
found &= mask;
}
while (!found);
/* FOUND contains 1 in bits for which we matched a relevant
character. Conversion to the byte index is trivial. */
found = __builtin_ctz(found);
return (const uchar *)p + found;
}
#ifdef HAVE_SSE4
/* A version of the fast scanner using SSE 4.2 vectorized string insns. */
static const uchar *
#ifndef __SSE4_2__
__attribute__((__target__("sse4.2")))
#endif
search_line_sse42 (const uchar *s, const uchar *end)
{
typedef char v16qi __attribute__ ((__vector_size__ (16)));
static const v16qi search = { '\n', '\r', '?', '\\' };
uintptr_t si = (uintptr_t)s;
uintptr_t index;
/* Check for unaligned input. */
if (si & 15)
{
v16qi sv;
if (__builtin_expect (end - s < 16, 0)
&& __builtin_expect ((si & 0xfff) > 0xff0, 0))
{
/* There are less than 16 bytes left in the buffer, and less
than 16 bytes left on the page. Reading 16 bytes at this
point might generate a spurious page fault. Defer to the
SSE2 implementation, which already handles alignment. */
return search_line_sse2 (s, end);
}
/* ??? The builtin doesn't understand that the PCMPESTRI read from
memory need not be aligned. */
sv = __builtin_ia32_loaddqu ((const char *) s);
index = __builtin_ia32_pcmpestri128 (search, 4, sv, 16, 0);
if (__builtin_expect (index < 16, 0))
goto found;
/* Advance the pointer to an aligned address. We will re-scan a
few bytes, but we no longer need care for reading past the
end of a page, since we're guaranteed a match. */
s = (const uchar *)((si + 15) & -16);
}
/* Main loop, processing 16 bytes at a time. */
#ifdef __GCC_ASM_FLAG_OUTPUTS__
while (1)
{
char f;
/* By using inline assembly instead of the builtin,
we can use the result, as well as the flags set. */
__asm ("%vpcmpestri\t$0, %2, %3"
: "=c"(index), "=@ccc"(f)
: "m"(*s), "x"(search), "a"(4), "d"(16));
if (f)
break;
s += 16;
}
#else
s -= 16;
/* By doing the whole loop in inline assembly,
we can make proper use of the flags set. */
__asm ( ".balign 16\n"
"0: add $16, %1\n"
" %vpcmpestri\t$0, (%1), %2\n"
" jnc 0b"
: "=&c"(index), "+r"(s)
: "x"(search), "a"(4), "d"(16));
#endif
found:
return s + index;
}
#else
/* Work around out-dated assemblers without sse4 support. */
#define search_line_sse42 search_line_sse2
#endif
/* Check the CPU capabilities. */
#include "../gcc/config/i386/cpuid.h"
typedef const uchar * (*search_line_fast_type) (const uchar *, const uchar *);
static search_line_fast_type search_line_fast;
#define HAVE_init_vectorized_lexer 1
static inline void
init_vectorized_lexer (void)
{
unsigned dummy, ecx = 0, edx = 0;
search_line_fast_type impl = search_line_acc_char;
int minimum = 0;
#if defined(__SSE4_2__)
minimum = 3;
#elif defined(__SSE2__)
minimum = 2;
#elif defined(__SSE__)
minimum = 1;
#endif
if (minimum == 3)
impl = search_line_sse42;
else if (__get_cpuid (1, &dummy, &dummy, &ecx, &edx) || minimum == 2)
{
if (minimum == 3 || (ecx & bit_SSE4_2))
impl = search_line_sse42;
else if (minimum == 2 || (edx & bit_SSE2))
impl = search_line_sse2;
else if (minimum == 1 || (edx & bit_SSE))
impl = search_line_mmx;
}
else if (__get_cpuid (0x80000001, &dummy, &dummy, &dummy, &edx))
{
if (minimum == 1
|| (edx & (bit_MMXEXT | bit_CMOV)) == (bit_MMXEXT | bit_CMOV))
impl = search_line_mmx;
}
search_line_fast = impl;
}
#elif defined(_ARCH_PWR8) && defined(__ALTIVEC__)
/* A vection of the fast scanner using AltiVec vectorized byte compares
and VSX unaligned loads (when VSX is available). This is otherwise
the same as the pre-GCC 5 version. */
ATTRIBUTE_NO_SANITIZE_UNDEFINED
static const uchar *
search_line_fast (const uchar *s, const uchar *end ATTRIBUTE_UNUSED)
{
typedef __attribute__((altivec(vector))) unsigned char vc;
const vc repl_nl = {
'\n', '\n', '\n', '\n', '\n', '\n', '\n', '\n',
'\n', '\n', '\n', '\n', '\n', '\n', '\n', '\n'
};
const vc repl_cr = {
'\r', '\r', '\r', '\r', '\r', '\r', '\r', '\r',
'\r', '\r', '\r', '\r', '\r', '\r', '\r', '\r'
};
const vc repl_bs = {
'\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\',
'\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\'
};
const vc repl_qm = {
'?', '?', '?', '?', '?', '?', '?', '?',
'?', '?', '?', '?', '?', '?', '?', '?',
};
const vc zero = { 0 };
vc data, t;
/* Main loop processing 16 bytes at a time. */
do
{
vc m_nl, m_cr, m_bs, m_qm;
data = __builtin_vec_vsx_ld (0, s);
s += 16;
m_nl = (vc) __builtin_vec_cmpeq(data, repl_nl);
m_cr = (vc) __builtin_vec_cmpeq(data, repl_cr);
m_bs = (vc) __builtin_vec_cmpeq(data, repl_bs);
m_qm = (vc) __builtin_vec_cmpeq(data, repl_qm);
t = (m_nl | m_cr) | (m_bs | m_qm);
/* T now contains 0xff in bytes for which we matched one of the relevant
characters. We want to exit the loop if any byte in T is non-zero.
Below is the expansion of vec_any_ne(t, zero). */
}
while (!__builtin_vec_vcmpeq_p(/*__CR6_LT_REV*/3, t, zero));
/* Restore s to to point to the 16 bytes we just processed. */
s -= 16;
{
#define N (sizeof(vc) / sizeof(long))
union {
vc v;
/* Statically assert that N is 2 or 4. */
unsigned long l[(N == 2 || N == 4) ? N : -1];
} u;
unsigned long l, i = 0;
u.v = t;
/* Find the first word of T that is non-zero. */
switch (N)
{
case 4:
l = u.l[i++];
if (l != 0)
break;
s += sizeof(unsigned long);
l = u.l[i++];
if (l != 0)
break;
s += sizeof(unsigned long);
/* FALLTHRU */
case 2:
l = u.l[i++];
if (l != 0)
break;
s += sizeof(unsigned long);
l = u.l[i];
}
/* L now contains 0xff in bytes for which we matched one of the
relevant characters. We can find the byte index by finding
its bit index and dividing by 8. */
#ifdef __BIG_ENDIAN__
l = __builtin_clzl(l) >> 3;
#else
l = __builtin_ctzl(l) >> 3;
#endif
return s + l;
#undef N
}
}
#elif (GCC_VERSION >= 4005) && defined(__ALTIVEC__) && defined (__BIG_ENDIAN__)
/* A vection of the fast scanner using AltiVec vectorized byte compares.
This cannot be used for little endian because vec_lvsl/lvsr are
deprecated for little endian and the code won't work properly. */
/* ??? Unfortunately, attribute(target("altivec")) is not yet supported,
so we can't compile this function without -maltivec on the command line
(or implied by some other switch). */
static const uchar *
search_line_fast (const uchar *s, const uchar *end ATTRIBUTE_UNUSED)
{
typedef __attribute__((altivec(vector))) unsigned char vc;
const vc repl_nl = {
'\n', '\n', '\n', '\n', '\n', '\n', '\n', '\n',
'\n', '\n', '\n', '\n', '\n', '\n', '\n', '\n'
};
const vc repl_cr = {
'\r', '\r', '\r', '\r', '\r', '\r', '\r', '\r',
'\r', '\r', '\r', '\r', '\r', '\r', '\r', '\r'
};
const vc repl_bs = {
'\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\',
'\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\'
};
const vc repl_qm = {
'?', '?', '?', '?', '?', '?', '?', '?',
'?', '?', '?', '?', '?', '?', '?', '?',
};
const vc ones = {
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
};
const vc zero = { 0 };
vc data, mask, t;
/* Altivec loads automatically mask addresses with -16. This lets us
issue the first load as early as possible. */
data = __builtin_vec_ld(0, (const vc *)s);
/* Discard bytes before the beginning of the buffer. Do this by
beginning with all ones and shifting in zeros according to the
mis-alignment. The LVSR instruction pulls the exact shift we
want from the address. */
mask = __builtin_vec_lvsr(0, s);
mask = __builtin_vec_perm(zero, ones, mask);
data &= mask;
/* While altivec loads mask addresses, we still need to align S so
that the offset we compute at the end is correct. */
s = (const uchar *)((uintptr_t)s & -16);
/* Main loop processing 16 bytes at a time. */
goto start;
do
{
vc m_nl, m_cr, m_bs, m_qm;
s += 16;
data = __builtin_vec_ld(0, (const vc *)s);
start:
m_nl = (vc) __builtin_vec_cmpeq(data, repl_nl);
m_cr = (vc) __builtin_vec_cmpeq(data, repl_cr);
m_bs = (vc) __builtin_vec_cmpeq(data, repl_bs);
m_qm = (vc) __builtin_vec_cmpeq(data, repl_qm);
t = (m_nl | m_cr) | (m_bs | m_qm);
/* T now contains 0xff in bytes for which we matched one of the relevant
characters. We want to exit the loop if any byte in T is non-zero.
Below is the expansion of vec_any_ne(t, zero). */
}
while (!__builtin_vec_vcmpeq_p(/*__CR6_LT_REV*/3, t, zero));
{
#define N (sizeof(vc) / sizeof(long))
union {
vc v;
/* Statically assert that N is 2 or 4. */
unsigned long l[(N == 2 || N == 4) ? N : -1];
} u;
unsigned long l, i = 0;
u.v = t;
/* Find the first word of T that is non-zero. */
switch (N)
{
case 4:
l = u.l[i++];
if (l != 0)
break;
s += sizeof(unsigned long);
l = u.l[i++];
if (l != 0)
break;
s += sizeof(unsigned long);
/* FALLTHROUGH */
case 2:
l = u.l[i++];
if (l != 0)
break;
s += sizeof(unsigned long);
l = u.l[i];
}
/* L now contains 0xff in bytes for which we matched one of the
relevant characters. We can find the byte index by finding
its bit index and dividing by 8. */
l = __builtin_clzl(l) >> 3;
return s + l;
#undef N
}
}
#elif defined (__ARM_NEON) && defined (__ARM_64BIT_STATE)
#include "arm_neon.h"
/* This doesn't have to be the exact page size, but no system may use
a size smaller than this. ARMv8 requires a minimum page size of
4k. The impact of being conservative here is a small number of
cases will take the slightly slower entry path into the main
loop. */
#define AARCH64_MIN_PAGE_SIZE 4096
static const uchar *
search_line_fast (const uchar *s, const uchar *end ATTRIBUTE_UNUSED)
{
const uint8x16_t repl_nl = vdupq_n_u8 ('\n');
const uint8x16_t repl_cr = vdupq_n_u8 ('\r');
const uint8x16_t repl_bs = vdupq_n_u8 ('\\');
const uint8x16_t repl_qm = vdupq_n_u8 ('?');
const uint8x16_t xmask = (uint8x16_t) vdupq_n_u64 (0x8040201008040201ULL);
#ifdef __ARM_BIG_ENDIAN
const int16x8_t shift = {8, 8, 8, 8, 0, 0, 0, 0};
#else
const int16x8_t shift = {0, 0, 0, 0, 8, 8, 8, 8};
#endif
unsigned int found;
const uint8_t *p;
uint8x16_t data;
uint8x16_t t;
uint16x8_t m;
uint8x16_t u, v, w;
/* Align the source pointer. */
p = (const uint8_t *)((uintptr_t)s & -16);
/* Assuming random string start positions, with a 4k page size we'll take
the slow path about 0.37% of the time. */
if (__builtin_expect ((AARCH64_MIN_PAGE_SIZE
- (((uintptr_t) s) & (AARCH64_MIN_PAGE_SIZE - 1)))
< 16, 0))
{
/* Slow path: the string starts near a possible page boundary. */
uint32_t misalign, mask;
misalign = (uintptr_t)s & 15;
mask = (-1u << misalign) & 0xffff;
data = vld1q_u8 (p);
t = vceqq_u8 (data, repl_nl);
u = vceqq_u8 (data, repl_cr);
v = vorrq_u8 (t, vceqq_u8 (data, repl_bs));
w = vorrq_u8 (u, vceqq_u8 (data, repl_qm));
t = vorrq_u8 (v, w);
t = vandq_u8 (t, xmask);
m = vpaddlq_u8 (t);
m = vshlq_u16 (m, shift);
found = vaddvq_u16 (m);
found &= mask;
if (found)
return (const uchar*)p + __builtin_ctz (found);
}
else
{
data = vld1q_u8 ((const uint8_t *) s);
t = vceqq_u8 (data, repl_nl);
u = vceqq_u8 (data, repl_cr);
v = vorrq_u8 (t, vceqq_u8 (data, repl_bs));
w = vorrq_u8 (u, vceqq_u8 (data, repl_qm));
t = vorrq_u8 (v, w);
if (__builtin_expect (vpaddd_u64 ((uint64x2_t)t) != 0, 0))
goto done;
}
do
{
p += 16;
data = vld1q_u8 (p);
t = vceqq_u8 (data, repl_nl);
u = vceqq_u8 (data, repl_cr);
v = vorrq_u8 (t, vceqq_u8 (data, repl_bs));
w = vorrq_u8 (u, vceqq_u8 (data, repl_qm));
t = vorrq_u8 (v, w);
} while (!vpaddd_u64 ((uint64x2_t)t));
done:
/* Now that we've found the terminating substring, work out precisely where
we need to stop. */
t = vandq_u8 (t, xmask);
m = vpaddlq_u8 (t);
m = vshlq_u16 (m, shift);
found = vaddvq_u16 (m);
return (((((uintptr_t) p) < (uintptr_t) s) ? s : (const uchar *)p)
+ __builtin_ctz (found));
}
#elif defined (__ARM_NEON)
#include "arm_neon.h"
static const uchar *
search_line_fast (const uchar *s, const uchar *end ATTRIBUTE_UNUSED)
{
const uint8x16_t repl_nl = vdupq_n_u8 ('\n');
const uint8x16_t repl_cr = vdupq_n_u8 ('\r');
const uint8x16_t repl_bs = vdupq_n_u8 ('\\');
const uint8x16_t repl_qm = vdupq_n_u8 ('?');
const uint8x16_t xmask = (uint8x16_t) vdupq_n_u64 (0x8040201008040201ULL);
unsigned int misalign, found, mask;
const uint8_t *p;
uint8x16_t data;
/* Align the source pointer. */
misalign = (uintptr_t)s & 15;
p = (const uint8_t *)((uintptr_t)s & -16);
data = vld1q_u8 (p);
/* Create a mask for the bytes that are valid within the first
16-byte block. The Idea here is that the AND with the mask
within the loop is "free", since we need some AND or TEST
insn in order to set the flags for the branch anyway. */
mask = (-1u << misalign) & 0xffff;
/* Main loop, processing 16 bytes at a time. */
goto start;
do
{
uint8x8_t l;
uint16x4_t m;
uint32x2_t n;
uint8x16_t t, u, v, w;
p += 16;
data = vld1q_u8 (p);
mask = 0xffff;
start:
t = vceqq_u8 (data, repl_nl);
u = vceqq_u8 (data, repl_cr);
v = vorrq_u8 (t, vceqq_u8 (data, repl_bs));
w = vorrq_u8 (u, vceqq_u8 (data, repl_qm));
t = vandq_u8 (vorrq_u8 (v, w), xmask);
l = vpadd_u8 (vget_low_u8 (t), vget_high_u8 (t));
m = vpaddl_u8 (l);
n = vpaddl_u16 (m);
found = vget_lane_u32 ((uint32x2_t) vorr_u64 ((uint64x1_t) n,
vshr_n_u64 ((uint64x1_t) n, 24)), 0);
found &= mask;
}
while (!found);
/* FOUND contains 1 in bits for which we matched a relevant
character. Conversion to the byte index is trivial. */
found = __builtin_ctz (found);
return (const uchar *)p + found;
}
#else
/* We only have one accelerated alternative. Use a direct call so that
we encourage inlining. */
#define search_line_fast search_line_acc_char
#endif
/* Initialize the lexer if needed. */
void
_cpp_init_lexer (void)
{
#ifdef HAVE_init_vectorized_lexer
init_vectorized_lexer ();
#endif
}
/* Returns with a logical line that contains no escaped newlines or
trigraphs. This is a time-critical inner loop. */
void
_cpp_clean_line (cpp_reader *pfile)
{
cpp_buffer *buffer;
const uchar *s;
uchar c, *d, *p;
buffer = pfile->buffer;
buffer->cur_note = buffer->notes_used = 0;
buffer->cur = buffer->line_base = buffer->next_line;
buffer->need_line = false;
s = buffer->next_line;
if (!buffer->from_stage3)
{
const uchar *pbackslash = NULL;
/* Fast path. This is the common case of an un-escaped line with
no trigraphs. The primary win here is by not writing any
data back to memory until we have to. */
while (1)
{
/* Perform an optimized search for \n, \r, \\, ?. */
s = search_line_fast (s, buffer->rlimit);
c = *s;
if (c == '\\')
{
/* Record the location of the backslash and continue. */
pbackslash = s++;
}
else if (__builtin_expect (c == '?', 0))
{
if (__builtin_expect (s[1] == '?', false)
&& _cpp_trigraph_map[s[2]])
{
/* Have a trigraph. We may or may not have to convert
it. Add a line note regardless, for -Wtrigraphs. */
add_line_note (buffer, s, s[2]);
if (CPP_OPTION (pfile, trigraphs))
{
/* We do, and that means we have to switch to the
slow path. */
d = (uchar *) s;
*d = _cpp_trigraph_map[s[2]];
s += 2;
goto slow_path;
}
}
/* Not a trigraph. Continue on fast-path. */
s++;
}
else
break;
}
/* This must be \r or \n. We're either done, or we'll be forced
to write back to the buffer and continue on the slow path. */
d = (uchar *) s;
if (__builtin_expect (s == buffer->rlimit, false))
goto done;
/* DOS line ending? */
if (__builtin_expect (c == '\r', false) && s[1] == '\n')
{
s++;