forked from Velocidex/velociraptor
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlogs.go
176 lines (148 loc) · 5.46 KB
/
logs.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
// NOTE: This file implements the old Client communication protocol. It is
// here to provide backwards communication with older clients and will
// eventually be removed.
package flows
import (
"context"
"regexp"
"strings"
"sync"
"time"
artifacts "www.velocidex.com/golang/velociraptor/artifacts"
config_proto "www.velocidex.com/golang/velociraptor/config/proto"
constants "www.velocidex.com/golang/velociraptor/constants"
crypto_proto "www.velocidex.com/golang/velociraptor/crypto/proto"
"www.velocidex.com/golang/velociraptor/file_store"
flows_proto "www.velocidex.com/golang/velociraptor/flows/proto"
"www.velocidex.com/golang/velociraptor/json"
"www.velocidex.com/golang/velociraptor/logging"
"www.velocidex.com/golang/velociraptor/paths"
"www.velocidex.com/golang/velociraptor/result_sets"
utils "www.velocidex.com/golang/velociraptor/utils"
)
var (
defaultLogErrorRegex = regexp.MustCompile(constants.VQL_ERROR_REGEX)
// If the config file specifies the regex we compile it once and
// cache in memory.
mu sync.Mutex
logErrorRegex *regexp.Regexp
)
func getLogErrorRegex(config_obj *config_proto.Config) *regexp.Regexp {
if config_obj.Frontend.CollectionErrorRegex != "" {
mu.Lock()
defer mu.Unlock()
if logErrorRegex == nil {
logErrorRegex = regexp.MustCompile(
config_obj.Frontend.CollectionErrorRegex)
}
return logErrorRegex
}
return defaultLogErrorRegex
}
// An optimized method for writing multiple log messages from the
// collection into the result set. This method avoids the need to
// parse the messages and reduces the total number of messages sent to
// the server from the clients.
func writeLogMessages(
config_obj *config_proto.Config,
collection_context *CollectionContext,
msg *crypto_proto.LogMessage) error {
flow_path_manager := paths.NewFlowPathManager(
collection_context.ClientId,
collection_context.SessionId).Log()
// Append logs to messages from previous packets.
file_store_factory := file_store.GetFileStore(config_obj)
rs_writer, err := result_sets.NewResultSetWriter(
file_store_factory, flow_path_manager,
json.DefaultEncOpts(), collection_context.completer.GetCompletionFunc(),
false /* truncate */)
if err != nil {
return err
}
defer rs_writer.Close()
rs_writer.SetStartRow(int64(msg.Id))
// The JSON payload from the client.
payload := artifacts.DeobfuscateString(config_obj, msg.Jsonl)
// Append the current server time to all rows.
payload = string(json.AppendJsonlItem([]byte(payload), "_ts",
time.Now().UTC().Unix()))
collection_context.TotalLogs += uint64(msg.NumberOfRows)
rs_writer.WriteJSONL([]byte(payload), uint64(msg.NumberOfRows))
if collection_context.State != flows_proto.
ArtifactCollectorContext_ERROR {
// Client will tag the errored message if the log message was
// written with ERROR level.
error_message := msg.ErrorMessage
// One of the messages triggered an error - we need to figure
// out which so we parse the JSONL payload to lock in on the
// first errored message.
if error_message == "" &&
getLogErrorRegex(config_obj).FindStringIndex(payload) != nil {
for _, line := range strings.Split(payload, "\n") {
if getLogErrorRegex(config_obj).FindStringIndex(line) != nil {
msg, err := utils.ParseJsonToObject([]byte(line))
if err == nil {
error_message, _ = msg.GetString("message")
}
}
}
}
// Does the payload contain errors? Mark the collection as failed.
if error_message != "" {
collection_context.State = flows_proto.ArtifactCollectorContext_ERROR
collection_context.Status = error_message
collection_context.Dirty = true
}
}
return nil
}
// Flush the logs to disk. During execution the flow collects the logs
// in memory and then flushes it all when done.
func flushContextLogs(
ctx context.Context, config_obj *config_proto.Config,
collection_context *CollectionContext,
completion *utils.Completer) error {
// Handle monitoring flow specially.
if collection_context.SessionId == constants.MONITORING_WELL_KNOWN_FLOW {
return flushContextLogsMonitoring(ctx, config_obj, collection_context)
}
flow_path_manager := paths.NewFlowPathManager(
collection_context.ClientId,
collection_context.SessionId).Log()
// Append logs to messages from previous packets.
file_store_factory := file_store.GetFileStore(config_obj)
rs_writer, err := result_sets.NewResultSetWriter(
file_store_factory, flow_path_manager,
json.DefaultEncOpts(), completion.GetCompletionFunc(),
false /* truncate */)
if err != nil {
return err
}
defer rs_writer.Close()
for _, row := range collection_context.Logs {
// If the log message matches the error regex mark the
// collection as errored out. Only record the first error.
if collection_context.State != flows_proto.
ArtifactCollectorContext_ERROR {
// If any messages are of level ERROR or the message
// matches the regex, then the collection is considered
// errored.
if row.Level == logging.ERROR ||
getLogErrorRegex(config_obj).FindStringIndex(row.Message) != nil {
collection_context.State = flows_proto.ArtifactCollectorContext_ERROR
collection_context.Status = row.Message
collection_context.Dirty = true
}
}
collection_context.TotalLogs++
rs_writer.WriteJSONL([]byte(json.Format(
"{\"_ts\":%d,\"client_time\":%d,\"level\":%q,\"message\":%q}\n",
int(time.Now().Unix()),
int64(row.Timestamp)/1000000,
row.Level,
row.Message)), 1)
}
// Clear the logs from the flow object.
collection_context.Logs = nil
return nil
}