-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathscan_engine.cc
3955 lines (3550 loc) · 140 KB
/
scan_engine.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/***************************************************************************
* scanengine.cc -- Includes much of the "engine" functions for scanning, *
* such as pos_scan and ultra_scan. It also includes dependant functions *
* such as those for collectiong SYN/connect scan responses. *
* *
***********************IMPORTANT NMAP LICENSE TERMS************************
* *
* The Nmap Security Scanner is (C) 1996-2006 Insecure.Com LLC. Nmap is *
* also a registered trademark of Insecure.Com LLC. This program is free *
* software; you may redistribute and/or modify it under the terms of the *
* GNU General Public License as published by the Free Software *
* Foundation; Version 2 with the clarifications and exceptions described *
* below. This guarantees your right to use, modify, and redistribute *
* this software under certain conditions. If you wish to embed Nmap *
* technology into proprietary software, we sell alternative licenses *
* (contact sales@insecure.com). Dozens of software vendors already *
* license Nmap technology such as host discovery, port scanning, OS *
* detection, and version detection. *
* *
* Note that the GPL places important restrictions on "derived works", yet *
* it does not provide a detailed definition of that term. To avoid *
* misunderstandings, we consider an application to constitute a *
* "derivative work" for the purpose of this license if it does any of the *
* following: *
* o Integrates source code from Nmap *
* o Reads or includes Nmap copyrighted data files, such as *
* nmap-os-fingerprints or nmap-service-probes. *
* o Executes Nmap and parses the results (as opposed to typical shell or *
* execution-menu apps, which simply display raw Nmap output and so are *
* not derivative works.) *
* o Integrates/includes/aggregates Nmap into a proprietary executable *
* installer, such as those produced by InstallShield. *
* o Links to a library or executes a program that does any of the above *
* *
* The term "Nmap" should be taken to also include any portions or derived *
* works of Nmap. This list is not exclusive, but is just meant to *
* clarify our interpretation of derived works with some common examples. *
* These restrictions only apply when you actually redistribute Nmap. For *
* example, nothing stops you from writing and selling a proprietary *
* front-end to Nmap. Just distribute it by itself, and point people to *
* http://insecure.org/nmap/ to download Nmap. *
* *
* We don't consider these to be added restrictions on top of the GPL, but *
* just a clarification of how we interpret "derived works" as it applies *
* to our GPL-licensed Nmap product. This is similar to the way Linus *
* Torvalds has announced his interpretation of how "derived works" *
* applies to Linux kernel modules. Our interpretation refers only to *
* Nmap - we don't speak for any other GPL products. *
* *
* If you have any questions about the GPL licensing restrictions on using *
* Nmap in non-GPL works, we would be happy to help. As mentioned above, *
* we also offer alternative license to integrate Nmap into proprietary *
* applications and appliances. These contracts have been sold to dozens *
* of software vendors, and generally include a perpetual license as well *
* as providing for priority support and updates as well as helping to *
* fund the continued development of Nmap technology. Please email *
* sales@insecure.com for further information. *
* *
* As a special exception to the GPL terms, Insecure.Com LLC grants *
* permission to link the code of this program with any version of the *
* OpenSSL library which is distributed under a license identical to that *
* listed in the included Copying.OpenSSL file, and distribute linked *
* combinations including the two. You must obey the GNU GPL in all *
* respects for all of the code used other than OpenSSL. If you modify *
* this file, you may extend this exception to your version of the file, *
* but you are not obligated to do so. *
* *
* If you received these files with a written license agreement or *
* contract stating terms other than the terms above, then that *
* alternative license agreement takes precedence over these comments. *
* *
* Source is provided to this software because we believe users have a *
* right to know exactly what a program is going to do before they run it. *
* This also allows you to audit the software for security holes (none *
* have been found so far). *
* *
* Source code also allows you to port Nmap to new platforms, fix bugs, *
* and add new features. You are highly encouraged to send your changes *
* to fyodor@insecure.org for possible incorporation into the main *
* distribution. By sending these changes to Fyodor or one the *
* Insecure.Org development mailing lists, it is assumed that you are *
* offering Fyodor and Insecure.Com LLC the unlimited, non-exclusive right *
* to reuse, modify, and relicense the code. Nmap will always be *
* available Open Source, but this is important because the inability to *
* relicense code has caused devastating problems for other Free Software *
* projects (such as KDE and NASM). We also occasionally relicense the *
* code to third parties as discussed above. If you wish to specify *
* special license conditions of your contributions, just say so when you *
* send them. *
* *
* This program is distributed in the hope that it will be useful, but *
* WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
* General Public License for more details at *
* http://www.gnu.org/copyleft/gpl.html , or in the COPYING file included *
* with Nmap. *
* *
***************************************************************************/
/* $Id$ */
#ifdef WIN32
#include "nmap_winconfig.h"
#endif
#include <dnet.h>
#include "scan_engine.h"
#include "timing.h"
#include "NmapOps.h"
#include "nmap_tty.h"
#include <list>
using namespace std;
extern NmapOps o;
class UltraScanInfo;
struct ultra_scan_performance_vars {
int low_cwnd; /* The lowest cwnd (congestion window) allowed */
int host_initial_cwnd; /* Initial congestion window for ind. hosts */
int group_initial_cwnd; /* Initial congestion window for all hosts as a group */
int max_cwnd; /* I should never have more than this many probes
outstanding */
int quick_incr; /* How many probes are incremented for each response
in quick start mode */
int cc_incr; /* How many probes are incremented per (roughly) rtt in
congestion control mode */
int initial_ccthresh;
/* When a successful ping response comes back, it counts as this many
"normal" responses, because the fact that pings are neccessary means
we aren't getting much input. */
int ping_magnifier;
/* Try to send a scanping if no response has been received from a target host
in this many usecs */
int pingtime;
double group_drop_cwnd_divisor; /* all-host group cwnd divided by this
value if any packet drop occurs */
double group_drop_ccthresh_divisor; /* used to drop the group ccthresh when
any drop occurs */
double host_drop_ccthresh_divisor; /* used to drop the host ccthresh when
any drop occurs */
int tryno_cap; /* The maximum trynumber (starts at zero) allowed */
};
/* Some of the algorithms used here are TCP congestion control
techniques from RFC2581. */
struct ultra_timing_vals {
double cwnd; /* Congestion window - in probes */
int ccthresh; /* The threshold after which mode is changed from QUICK_START
to CONGESTION_CONTROL */
int num_updates; /* Number of updates to this utv (generally packet receipts ) */
/* Last time values were adjusted for a drop (you usually only want
to adjust again based on probes sent after that adjustment so a
sudden batch of drops doesn't destroy timing. Init to now */
struct timeval last_drop;
};
struct probespec_tcpdata {
u16 dport;
u8 flags;
};
struct probespec_udpdata {
u16 dport;
};
#define PS_NONE 0
#define PS_TCP 1
#define PS_UDP 2
#define PS_PROTO 3
#define PS_ICMP 4
#define PS_ARP 5
static const char *pspectype2ascii(int type) {
switch(type) {
case PS_NONE:
return "NONE";
case PS_TCP:
return "TCP";
case PS_UDP:
return "UDP";
case PS_PROTO:
return "IP Proto";
case PS_ICMP:
return "ICMP";
case PS_ARP:
return "ARP";
default:
fatal("%s: Unknown type: %d", __FUNCTION__, type);
}
return ""; // Unreached
}
/* The size of this structure is critical, since there can be tens of
thousands of them stored together ... */
typedef struct probespec {
/* To save space, I changed this from private enum (took 4 bytes) to
u8 that uses #defines above */
u8 type;
u8 proto; /* If not PS_ARP -- Protocol number ... eg IPPROTO_TCP, etc. */
union {
struct probespec_tcpdata tcp; /* if type is PS_TCP */
struct probespec_udpdata udp; /* PS_UDP */
/* Commented out for now, but will likely contan icmp type, maybe
code, used for PS_ICMP */
// struct probespec_icmpdata icmp;
/* Nothing needed for PS_ARP, since src mac and target IP are
avail from target structure anyway */
} pd;
} probespec;
class ConnectProbe {
public:
ConnectProbe();
~ConnectProbe();
int sd; /* Socket descriptor used for connection. -1 if not valid. */
};
struct IPExtraProbeData_tcp {
u16 sport;
u32 seq; /* host byte order (like the other fields */
};
struct IPExtraProbeData_udp {
u16 sport;
};
struct IPExtraProbeData {
u16 ipid; /* host byte order */
union {
struct IPExtraProbeData_tcp tcp;
struct IPExtraProbeData_udp udp;
} pd;
};
/* At least for now, I'll just use this like a struct and access
all the data members directly */
class UltraProbe {
public:
UltraProbe();
~UltraProbe();
enum UPType { UP_UNSET, UP_IP, UP_CONNECT, UP_RPC, UP_ARP } type; /* The type of probe this is */
/* Sets this UltraProbe as type UP_IP and creates & initializes the
internal IPProbe. The relevent probespec is necessary for setIP
because pspec.type is ambiguous with just the ippacket (e.g. a
tcp packet could be PS_PROTO or PS_TCP). */
void setIP(u8 *ippacket, u32 iplen, const probespec *pspec);
/* Sets this UltraProbe as type UP_CONNECT, preparing to connect to given
port number*/
void setConnect(u16 portno);
/* Pass an arp packet, including ethernet header. Must be 42bytes */
void setARP(u8 *arppkt, u32 arplen);
// The 4 accessors below all return in HOST BYTE ORDER
// source port used if TCP or UDP
u16 sport() {
return (mypspec.proto == IPPROTO_TCP)? probes.IP.pd.tcp.sport : probes.IP.pd.udp.sport; }
// destination port used if TCP or UDP
u16 dport() {
return (mypspec.proto == IPPROTO_TCP)? mypspec.pd.tcp.dport : mypspec.pd.udp.dport; }
u16 ipid() { return probes.IP.ipid; }
u32 tcpseq(); // TCP sequence number if protocol is TCP
/* Number, such as IPPROTO_TCP, IPPROTO_UDP, etc. */
u8 protocol() { return mypspec.proto; }
ConnectProbe *CP() { return probes.CP; } // if type == UP_CONNECT
// Arpprobe removed because not used.
// ArpProbe *AP() { return probes.AP; } // if UP_ARP
// Returns the protocol number, such as IPPROTO_TCP, or IPPROTO_UDP, by
// reading the appropriate fields of the probespec.
/* Get general details about the probe */
const probespec *pspec() { return &mypspec; }
u8 tryno; /* Try (retransmission) number of this probe */
u8 pingseq; /* 0 if this is not a scanping. Otherwise a posative ping seq#. */
/* If true, probe is considered no longer active due to timeout, but it
may be kept around a while, just in case a reply comes late */
bool timedout;
/* A packet may be timedout for a while before being retransmitted due to
packet sending rate limitations */
bool retransmitted;
struct timeval sent;
/* Time the previous probe was sent, if this is a retransmit (tryno > 0) */
struct timeval prevSent;
bool isPing() { return pingseq > 0; }
private:
probespec mypspec; /* Filled in by the appropriate set* function */
union {
IPExtraProbeData IP;
ConnectProbe *CP;
// ArpProbe *AP;
} probes;
void *internalProbe;
};
/* Global info for the connect scan */
class ConnectScanInfo {
public:
ConnectScanInfo();
~ConnectScanInfo();
/* Watch a socket descriptor (add to fd_sets and maxValidSD). Returns
true if the SD was absent from the list, false if you tried to
watch an SD that was already being watched. */
bool watchSD(int sd);
/* Clear SD from the fd_sets and maxValidSD. Returns true if the SD
was in the list, false if you tried to clear an sd that wasn't
there in the first place. */
bool clearSD(int sd);
int maxValidSD; /* The maximum socket descriptor in any of the fd_sets */
fd_set fds_read;
fd_set fds_write;
fd_set fds_except;
int numSDs; /* Number of socket descriptors being watched */
int maxSocketsAllowed; /* No more than this many sockets may be created @once */
};
/* These are ultra_scan() statistics for the whole group of Targets */
class GroupScanStats {
public:
struct timeval timeout; /* The time at which we abort the scan */
/* Most recent host tested for sendability */
struct sockaddr_storage latestip;
GroupScanStats(UltraScanInfo *UltraSI);
~GroupScanStats();
/* Returns true if the GLOBAL system says that sending is OK. */
bool sendOK();
/* Total # of probes outstanding (active) for all Hosts */
int num_probes_active;
UltraScanInfo *USI; /* The USI which contains this GSS. Use for at least
getting the current time w/o gettimeofday() */
struct ultra_timing_vals timing;
struct timeout_info to; /* Group-wide packet rtt/timeout info */
int numtargets; /* Total # of targets scanned -- includes finished and incomplete hosts */
int numprobes; /* Number of probes/ports scanned on each host */
/* The last time waitForResponses finished (initialized to GSS creation time */
int probes_sent; /* Number of probes sent in total. This DOES include pings and retransmissions */
struct timeval last_wait;
int probes_sent_at_last_wait;
// number of hosts that timed out during scan, or were already timedout
int num_hosts_timedout;
ConnectScanInfo *CSI;
};
struct send_delay_nfo {
unsigned int delayms; /* Milliseconds to delay between probes */
/* The number of successful and dropped probes since the last time delayms
was changed */
unsigned int goodRespSinceDelayChanged;
unsigned int droppedRespSinceDelayChanged;
struct timeval last_boost; /* Most recent time of increase to delayms. Init to creation time. */
};
/* To test for rate limiting, there is a delay in sending the first packet
of a certain retransmission number. These values help track that. */
struct rate_limit_detection_nfo {
unsigned int max_tryno_sent; /* What is the max tryno we have sent so far (starts at 0) */
bool rld_waiting; /* Are we currently waiting due to RLD? */
struct timeval rld_waittime; /* if RLD waiting, when can we send? */
};
/* The ultra_scan() statistics that apply to individual target hosts in a
group */
class HostScanStats {
public:
Target *target; /* A copy of the Target that these stats refer to. */
HostScanStats(Target *t, UltraScanInfo *UltraSI);
~HostScanStats();
int freshPortsLeft(); /* Returns the number of ports remaining to probe */
int next_portidx; /* Index of the next port to probe in the relevent
ports array in USI.ports */
bool sent_arp; /* Has an ARP probe been sent for the target yet? */
/* How long I am currently willing to wait for a probe response
before considering it timed out. Uses the host values from
target if they are available, otherwise from gstats. Results
returned in MICROseconds. */
unsigned long probeTimeout();
/* How long I'll wait until completely giving up on a probe.
Timedout probes are often marked as such (and sometimes
considered a drop), but kept in the list juts in case they come
really late. But after probeExpire(), I don't waste time keeping
them around. Give in MICROseconds */
unsigned long probeExpire();
/* Returns OK if sending a new probe to this host is OK (to avoid
flooding). If when is non-NULL, fills it with the time that sending
will be OK assuming no pending probes are resolved by responses
(call it again if they do). when will become now if it returns
true. */
bool sendOK(struct timeval *when);
/* If there are pending probe timeouts, fills in when with the time of
the earliest one and returns true. Otherwise returns false and
puts now in when. */
bool nextTimeout(struct timeval *when);
UltraScanInfo *USI; /* The USI which contains this HSS */
/* Removes a probe from probes_outstanding, adjusts HSS and USS
active probe stats accordingly, then deletes the probe. */
void destroyOutstandingProbe(list<UltraProbe *>::iterator probeI);
/* Mark an outstanding probe as timedout. Adjusts stats
accordingly. For connect scans, this closes the socket. */
void markProbeTimedout(list<UltraProbe *>::iterator probeI);
/* New (active) probes are appended to the end of this list. When a
host times out, it will be marked as such, but may hang around on
the list for a while just in case a response comes in. So use
num_probes_active to learn how many active (not timed out) probes
are outstanding. Probes on the bench (reached the current
maximum tryno and expired) are not counted in
probes_outstanding. */
list<UltraProbe *> probes_outstanding;
/* The number of probes in probes_outstanding, minus the inactive (timed out) ones */
unsigned int num_probes_active;
/* Probes timed out but not yet retransmitted because of congestion
control limits or because more retransmits may not be
neccessary. Note that probes on probe_bench are not included
in this value. */
unsigned int num_probes_waiting_retransmit;
unsigned int num_probes_outstanding() { return probes_outstanding.size(); }
/* The bench is a stock of probes (compacted into just the
probespec) that have met the current maximum tryno, and are on
ice until that tryno increases (so we can retransmit again), or
solidifies (so we can mark the port firewalled or whatever). The
tryno of benh members is bench_tryno. If the maximum tryno
increases, everyone on the bench is moved to the retry_stack.
*/
vector<probespec> probe_bench;
unsigned int bench_tryno; /* # tryno of probes on the bench */
/* The retry_stack are probespecs that were on the bench but are now
slated to be retried. It is kept sorted such that probes with highest
retry counts are on top, ready to be taken first. */
vector<probespec> retry_stack;
/* retry_stack_tries MUST BE KEPT IN SYNC WITH retry_stack.
retry_stack_tries[i] is the number of completed retries for the
probe in retry_stack[i] */
vector<u8> retry_stack_tries;
/* tryno of probes on the retry queue */
/* Moves the given probe from the probes_outstanding list, to
probe_bench, and decrements num_probes_waiting_retransmit accordingly */
void moveProbeToBench(list<UltraProbe *>::iterator probeI);
/* Dismiss all probe attempts on bench -- the ports are marked
'filtered' or whatever is appropriate for having no response */
void dismissBench();
/* Move all members of bench to retry_stack for probe retransmission */
void retransmitBench();
bool completed(); /* Whether or not the scan of this Target has completed */
/* This function provides the proper cwnd and ccthresh to use. It
may differ from versions in timing member var because when no
responses have been received for this host, may look at others in
the group. For CHANGING this host's timing, use the timing
memberval instead. */
void getTiming(struct ultra_timing_vals *tmng);
struct ultra_timing_vals timing;
/* The most recently received probe response time -- initialized to scan start time. */
struct timeval lastrcvd;
struct timeval lastping_sent; /* The time the most recent ping was sent (initialized to scan begin time) */
/* Value of numprobes_sent at lastping_sent time -- to ensure that we
don't send too many pings when probes are going slowly. */
int lastping_sent_numprobes;
struct timeval lastprobe_sent; /* Most recent probe send (including pings) by host. Init to scan begin time. */
/* A valid probe for sending scanpings. */
probespec pingprobe;
int pingprobestate; /* PORT_UNKNOWN if no pingprobe yet found */
/* gives the maximum try number (try numbers start at zero and
increments for each retransmission) that may be used, based on
the scan type, observed network reliability, timing mode, etc.
This may change during the scan based on network traffic. If
capped is not null, it will be filled with true if the tryno is
at its upper limit. That often calls for a warning to be issued,
and marking of remaining timedout ports firewalled or whatever is
appropriate. If mayincrease is non-NULL, it is set to whether
the allowedTryno may increase again. If it is false, any probes
which have reached the given limit may be dealth with. */
unsigned int allowedTryno(bool *capped, bool *mayincrease);
/* Provides the next ping sequence number. This starts at one, goes
up to 255, then wraps around back to 1. If inc is true, it is
incremented. Otherwise you just get a peek at what the next one
will be. */
u8 nextPingSeq(bool inc=true) {
u8 ret = nxtpseq;
if (inc) {
nxtpseq++;
if (nxtpseq == 0) nxtpseq++;
}
return ret;
}
/* This is the highest try number that has produced useful results
(such as port status change). */
unsigned int max_successful_tryno;
/* This starts as true because tryno may increase based on results, but
it becomes false if it becomes clear that tryno will not increase
further during the scan */
bool tryno_mayincrease;
int ports_finished; /* The number of ports of this host that have been determined */
int numprobes_sent; /* Number of port probes (not counting pings, but counting retransmits) sent to this host */
int numpings_sent;
/* Boost the scan delay for this host, usually because too many packet
drops were detected. */
void boostScanDelay();
struct send_delay_nfo sdn;
struct rate_limit_detection_nfo rld;
private:
u8 nxtpseq; /* the next scanping sequence number to use */
};
class UltraScanInfo {
public:
UltraScanInfo();
UltraScanInfo(vector<Target *> &Targets, struct scan_lists *pts, stype scantype) { Init(Targets, pts, scantype); }
~UltraScanInfo();
/* Must call Init if you create object with default constructor */
void Init(vector<Target *> &Targets, struct scan_lists *pts, stype scantp);
/* Consults with the group stats, and the hstats for every
incomplete hosts to determine whether any probes may be sent.
Returns true if they can be sent immediately. If when is non-NULL,
it is filled with the next possible time that probes can be sent
(which will be now, if the function returns true */
bool sendOK(struct timeval *tv);
stype scantype;
bool tcp_scan; /* scantype is a type of TCP scan */
bool udp_scan;
bool icmp_scan;
bool prot_scan;
bool ping_scan; /* Includes trad. ping scan & arp scan */
bool ping_scan_arp; /* ONLY includes arp ping scan */
bool noresp_open_scan; /* Whether no response means a port is open */
struct timeval now; /* Updated after potentially meaningful delays. This can
be used to save a call to gettimeofday() */
GroupScanStats *gstats;
struct ultra_scan_performance_vars perf;
/* A circular buffer of the incompleteHosts. nextIncompleteHost() gives
the next one. The first time it is called, it will give the
first host in the list. If incompleteHosts is empty, returns
NULL. */
HostScanStats *nextIncompleteHost();
/* Removes any hosts that have completed their scans from the incompleteHosts
list. Returns the number of hosts removed. */
int removeCompletedHosts();
/* Find a HostScanStats by IP its address in the incomplete list.
Returns NULL if none are found. */
HostScanStats *findIncompleteHost(struct sockaddr_storage *ss);
unsigned int numIncompleteHosts() { return incompleteHosts.size(); }
unsigned int numInitialHosts() { return numInitialTargets; }
/* Any function which messes with (removes elements from)
incompleteHosts may have to manipulate nextI */
list<HostScanStats *> incompleteHosts;
ScanProgressMeter *SPM;
struct scan_lists *ports;
int rawsd; /* raw socket descriptor */
pcap_t *pd;
eth_t *ethsd;
u32 seqmask; /* This mask value is used to encode values in sequence
numbers. It is set randomly in UltraScanInfo::Init() */
private:
unsigned int numInitialTargets;
list<HostScanStats *>::iterator nextI;
};
/* Whether this is storing timing stats for a whole group or an
individual host */
enum ultra_timing_type { TIMING_HOST, TIMING_GROUP };
/* Initialize the ultra_timing_vals structure timing. The utt must be
TIMING_HOST or TIMING_GROUP. If you happen to have the current
time handy, pass it as now, otherwise pass NULL */
static void init_ultra_timing_vals(ultra_timing_vals *timing,
enum ultra_timing_type utt,
int num_hosts_in_group,
struct ultra_scan_performance_vars *perf,
struct timeval *now);
/* Take a buffer, buf, of size bufsz (32 bytes is sufficient) and
writes a short description of the probe (arg1) into buf. It also returns
buf. */
static char *probespec2ascii(probespec *pspec, char *buf, unsigned int bufsz) {
char flagbuf[32];
char *f;
switch(pspec->type) {
case PS_TCP:
if (!pspec->pd.tcp.flags) Strncpy(flagbuf, "(none)", sizeof(flagbuf));
else {
f = flagbuf;
if (pspec->pd.tcp.flags & TH_SYN) *f++ = 'S';
if (pspec->pd.tcp.flags & TH_FIN) *f++ = 'F';
if (pspec->pd.tcp.flags & TH_RST) *f++ = 'R';
if (pspec->pd.tcp.flags & TH_PUSH) *f++ = 'P';
if (pspec->pd.tcp.flags & TH_ACK) *f++ = 'A';
if (pspec->pd.tcp.flags & TH_URG) *f++ = 'U';
if (pspec->pd.tcp.flags & TH_ECE) *f++ = 'E'; /* rfc 2481/3168 */
if (pspec->pd.tcp.flags & TH_CWR) *f++ = 'C'; /* rfc 2481/3168 */
*f++ = '\0';
}
snprintf(buf, bufsz, "tcp to port %hu; flags: %s", pspec->pd.tcp.dport,
flagbuf);
break;
case PS_UDP:
snprintf(buf, bufsz, "udp to port %hu", pspec->pd.udp.dport);
break;
case PS_PROTO:
snprintf(buf, bufsz, "protocol %u", (unsigned int) pspec->proto);
break;
case PS_ARP:
snprintf(buf, bufsz, "ARP");
break;
default:
fatal("Unexpected probespec2ascii type encountered");
break;
}
return buf;
}
ConnectProbe::ConnectProbe() {
sd = -1;
}
ConnectProbe::~ConnectProbe() {
if (sd > 0) close(sd);
sd = -1;
}
UltraProbe::UltraProbe() {
type = UP_UNSET;
tryno = 0;
timedout = false;
retransmitted = false;
pingseq = 0;
mypspec.type = PS_NONE;
memset(&sent, 0, sizeof(prevSent));
memset(&prevSent, 0, sizeof(prevSent));
}
UltraProbe::~UltraProbe() {
if (type == UP_CONNECT)
delete probes.CP;
}
/* Pass an arp packet, including ethernet header. Must be 42bytes */
void UltraProbe::setARP(u8 *arppkt, u32 arplen) {
type = UP_ARP;
mypspec.type = PS_ARP;
return;
}
/* Sets this UltraProbe as type UP_IP and creates & initializes the
internal IPProbe. The relevent probespec is necessary for setIP
because pspec.type is ambiguous with just the ippacket (e.g. a
tcp packet could be PS_PROTO or PS_TCP). */
void UltraProbe::setIP(u8 *ippacket, u32 iplen, const probespec *pspec) {
struct ip *ipv4 = (struct ip *) ippacket;
struct tcp_hdr *tcp = NULL;
struct udp_hdr *udp = NULL;
type = UP_IP;
if (ipv4->ip_v != 4)
fatal("Bogus packet passed to %s -- only IPv4 packets allowed",
__FUNCTION__);
assert(iplen >= 20);
assert(iplen == (u32) ntohs(ipv4->ip_len));
probes.IP.ipid = ntohs(ipv4->ip_id);
if (ipv4->ip_p == IPPROTO_TCP) {
assert (iplen >= (unsigned) ipv4->ip_hl * 4 + 20);
tcp = (struct tcp_hdr *) ((u8 *) ipv4 + ipv4->ip_hl * 4);
probes.IP.pd.tcp.sport = ntohs(tcp->th_sport);
probes.IP.pd.tcp.seq = ntohl(tcp->th_seq);
} else if (ipv4->ip_p == IPPROTO_UDP) {
assert(iplen >= (unsigned) ipv4->ip_hl * 4 + 8);
udp = (struct udp_hdr *) ((u8 *) ipv4 + ipv4->ip_hl * 4);
probes.IP.pd.udp.sport = ntohs(udp->uh_sport);
}
mypspec = *pspec;
return;
}
u32 UltraProbe::tcpseq() {
if (mypspec.proto == IPPROTO_TCP)
return probes.IP.pd.tcp.seq;
else
fatal("Bogus seq number request to %s -- type is %s", __FUNCTION__,
pspectype2ascii(mypspec.type));
return 0; // Unreached
}
/* Sets this UltraProbe as type UP_CONNECT, preparing to connect to given
port number*/
void UltraProbe::setConnect(u16 portno) {
type = UP_CONNECT;
probes.CP = new ConnectProbe();
mypspec.type = PS_TCP;
mypspec.proto = IPPROTO_TCP;
mypspec.pd.tcp.dport = portno;
mypspec.pd.tcp.flags = TH_SYN;
}
ConnectScanInfo::ConnectScanInfo() {
maxValidSD = -1;
numSDs = 0;
maxSocketsAllowed = (o.max_parallelism)? o.max_parallelism : MAX(5, max_sd() - 4);
FD_ZERO(&fds_read);
FD_ZERO(&fds_write);
FD_ZERO(&fds_except);
}
/* Nothing really to do here. */
ConnectScanInfo::~ConnectScanInfo() {}
/* Watch a socket descriptor (add to fd_sets and maxValidSD). Returns
true if the SD was absent from the list, false if you tried to
watch an SD that was already being watched. */
bool ConnectScanInfo::watchSD(int sd) {
assert(sd >= 0);
if (!FD_ISSET(sd, &fds_read)) {
FD_SET(sd, &fds_read);
FD_SET(sd, &fds_write);
FD_SET(sd, &fds_except);
numSDs++;
if (sd > maxValidSD)
maxValidSD = sd;
} else return false;
return true;
}
/* Clear SD from the fd_sets and maxValidSD. Returns true if the SD
was in the list, false if you tried to clear an sd that wasn't
there in the first place. */
bool ConnectScanInfo::clearSD(int sd) {
assert(sd >= 0);
if (FD_ISSET(sd, &fds_read)) {
FD_CLR(sd, &fds_read);
FD_CLR(sd, &fds_write);
FD_CLR(sd, &fds_except);
assert(numSDs > 0);
numSDs--;
if (sd == maxValidSD)
maxValidSD--;
} else return false;
return true;
}
GroupScanStats::GroupScanStats(UltraScanInfo *UltraSI) {
memset(&latestip, 0, sizeof(latestip));
memset(&timeout, 0, sizeof(timeout));
USI = UltraSI;
init_ultra_timing_vals(&timing, TIMING_GROUP, USI->numIncompleteHosts(), &(USI->perf), &USI->now);
initialize_timeout_info(&to);
/* Default timout should be much lower for arp */
if (USI->ping_scan_arp)
to.timeout = MIN(o.initialRttTimeout(), 100) * 1000;
num_probes_active = 0;
numtargets = USI->numIncompleteHosts(); // They are all incomplete at the beginning
if (USI->tcp_scan) {
numprobes = USI->ports->tcp_count;
} else if (USI->udp_scan) {
numprobes = USI->ports->udp_count;
} else if (USI->prot_scan) {
numprobes = USI->ports->prot_count;
} else if (USI->ping_scan_arp) {
numprobes = 1;
} else assert(0); /* TODO: RPC scan and maybe ping */
if (USI->scantype == CONNECT_SCAN)
CSI = new ConnectScanInfo;
else CSI = NULL;
probes_sent = probes_sent_at_last_wait = 0;
gettimeofday(&last_wait, NULL);
num_hosts_timedout = 0;
}
GroupScanStats::~GroupScanStats() {
delete CSI;
}
/* Returns true if the GLOBAL system says that sending is OK.*/
bool GroupScanStats::sendOK() {
int recentsends;
if (USI->scantype == CONNECT_SCAN && CSI->numSDs >= CSI->maxSocketsAllowed)
return false;
/* We need to stop sending if it has been a long time since
the last listen call, at least for systems such as Windoze that
don't give us a proper pcap time. Also for connect scans, since
we don't get an exact response time with them either. */
recentsends = USI->gstats->probes_sent - USI->gstats->probes_sent_at_last_wait;
if (recentsends > 0 &&
(USI->scantype == CONNECT_SCAN || !pcap_recv_timeval_valid())) {
int to_ms = (int) MAX(to.srtt * .75 / 1000, 50);
if (TIMEVAL_MSEC_SUBTRACT(USI->now, last_wait) > to_ms)
return false;
}
/* There are good arguments for limiting the number of probes sent
between waits even when we do get appropriate receive times. For
example, overflowing the pcap receive buffer with responses is no
fun. On one of my Linux boxes, it seems to hold about 113
responses when I scan localhost. And half of those are the @#$#
sends being received. I think I'll put a limit of 50 sends per
wait */
if (recentsends >= 50)
return false;
/* When there is only one target left, let the host congestion
stuff deal with it. */
if (USI->numIncompleteHosts() == 1)
return true;
if (timing.cwnd >= num_probes_active + 0.5)
return true;
return false;
}
/* For the given scan type, this returns the port/host state demonstrated
by getting no response back */
static int scantype_no_response_means(stype scantype) {
switch(scantype) {
case SYN_SCAN:
case ACK_SCAN:
case WINDOW_SCAN:
case CONNECT_SCAN:
return PORT_FILTERED;
case UDP_SCAN:
case IPPROT_SCAN:
case NULL_SCAN:
case FIN_SCAN:
case MAIMON_SCAN:
case XMAS_SCAN:
return PORT_OPENFILTERED;
case PING_SCAN_ARP:
return HOST_DOWN;
default:
fatal("Unexpected scan type found in scantype_no_response_means()");
}
return 0; /* Unreached */
}
HostScanStats::HostScanStats(Target *t, UltraScanInfo *UltraSI) {
target = t;
USI=UltraSI;
next_portidx = 0;
sent_arp = false;
num_probes_active = 0;
num_probes_waiting_retransmit = 0;
lastping_sent = lastprobe_sent = lastrcvd = USI->now;
lastping_sent_numprobes = 0;
memset(&pingprobe, 0, sizeof(pingprobe));
pingprobestate = PORT_UNKNOWN;
nxtpseq = 1;
max_successful_tryno = 0;
tryno_mayincrease = true;
ports_finished = 0;
numprobes_sent = 0;
numpings_sent = 0;
init_ultra_timing_vals(&timing, TIMING_HOST, 1, &(USI->perf), &USI->now);
bench_tryno = 0;
memset(&sdn, 0, sizeof(sdn));
sdn.last_boost = USI->now;
sdn.delayms = o.scan_delay;
rld.max_tryno_sent = 0;
rld.rld_waiting = false;
rld.rld_waittime = USI->now;
}
HostScanStats::~HostScanStats() {
list<UltraProbe *>::iterator probeI, next;
/* Move any hosts from the bench to probes_outstanding for easier deletion */
for(probeI = probes_outstanding.begin(); probeI != probes_outstanding.end();
probeI = next) {
next = probeI;
next++;
destroyOutstandingProbe(probeI);
}
}
/* How long I am currently willing to wait for a probe response before
considering it timed out. Uses the host values from target if they
are available, otherwise from gstats. Results returned in
MICROseconds. */
unsigned long HostScanStats::probeTimeout() {
if (target->to.srtt > 0) {
/* We have at least one timing value to use. Good enough, I suppose */
return target->to.timeout;
} else if (USI->gstats->to.srtt > 0) {
/* OK, we'll use this one instead */
return USI->gstats->to.timeout;
} else {
return target->to.timeout; /* It comes with a default */
}
}
/* How long I'll wait until completely giving up on a probe.
Timedout probes are often marked as such (and sometimes
considered a drop), but kept in the list just in case they come
really late. But after probeExpire(), I don't waste time keeping
them around. Give in MICROseconds */
unsigned long HostScanStats::probeExpire() {
if (USI->scantype == CONNECT_SCAN)
return probeTimeout(); /* timedout probes close socket -- late resp. impossible */
return MIN(10000000, probeTimeout() * 10);
}
/* Returns OK if sending a new probe to this host is OK (to avoid
flooding). If when is non-NULL, fills it with the time that sending
will be OK assuming no pending probes are resolved by responses
(call it again if they do). when will become now if it returns
true. */
bool HostScanStats::sendOK(struct timeval *when) {
struct ultra_timing_vals tmng;
int packTime;
list<UltraProbe *>::iterator probeI;
struct timeval probe_to, earliest_to, sendTime;
long tdiff;
if (target->timedOut(&USI->now) || completed()) {
if (when) *when = USI->now;
return false;
}
if (rld.rld_waiting) {
packTime = TIMEVAL_MSEC_SUBTRACT(rld.rld_waittime, USI->now);
if (packTime <= 0) {
if (when) *when = USI->now;
return true;
}
if (when) *when = rld.rld_waittime;
return false;
}
if (sdn.delayms) {
packTime = TIMEVAL_MSEC_SUBTRACT(USI->now, lastprobe_sent);
if (packTime < (int) sdn.delayms) {
if (when) { TIMEVAL_MSEC_ADD(*when, lastprobe_sent, sdn.delayms); }
return false;
}
}
getTiming(&tmng);
if (tmng.cwnd >= num_probes_active + .5 &&
(freshPortsLeft() || num_probes_waiting_retransmit || !retry_stack.empty())) {
if (when) *when = USI->now;
return true;
}
if (!when)
return false;
TIMEVAL_MSEC_ADD(earliest_to, USI->now, 10000);
// Any timeouts coming up?
for(probeI = probes_outstanding.begin(); probeI != probes_outstanding.end();
probeI++) {
if (!(*probeI)->timedout) {
TIMEVAL_MSEC_ADD(probe_to, (*probeI)->sent, probeTimeout() / 1000);
if (TIMEVAL_SUBTRACT(probe_to, earliest_to) < 0) {
earliest_to = probe_to;
}
}
}
// Will any scan delay affect this?
if (sdn.delayms) {
TIMEVAL_MSEC_ADD(sendTime, lastprobe_sent, sdn.delayms);
if (TIMEVAL_MSEC_SUBTRACT(sendTime, USI->now) < 0)
sendTime = USI->now;
tdiff = TIMEVAL_MSEC_SUBTRACT(earliest_to, sendTime);
/* Timeouts previous to the sendTime requirement are pointless,
and those later than sendTime are not needed if we can send a
new packet at sendTime */
if (tdiff < 0) {
earliest_to = sendTime;
} else {
getTiming(&tmng);
if (tdiff > 0 && tmng.cwnd > num_probes_active + .5) {
earliest_to = sendTime;
}
}