From c97e4aec6ed10d1de3b25e7b833594caac707cd1 Mon Sep 17 00:00:00 2001 From: Mike Cohen Date: Tue, 7 Apr 2020 18:37:34 +1000 Subject: [PATCH] Updated the fs command to be able to use the filestore. (#301) It is essential to be able to easily copy files into and out of the filestore. With the new filestore being inside mysql it is otherwise hard to administer it. This change adds the ability to see inside the filestore to the `fs ls`, `fs cp` and `fs cat` command. It is now possible to add items to the public directory like: ``` velociraptor -v fs cp '*.exe' fs:///public/ ``` --- actions/vql.go | 4 +- artifacts/assets/ab0x.go | 4 +- bin/artifacts_acquire.go | 4 +- bin/console.go | 4 +- bin/fs.go | 149 +++++++-- bin/query.go | 4 +- bin/unzip.go | 4 +- file_store/accessor.go | 147 --------- file_store/adapter.go | 75 +++++ file_store/compress.go | 12 +- file_store/directory.go | 236 +++++++++++++++ file_store/directory_accessor.go | 148 +++++++++ file_store/file_store.go | 282 ++---------------- file_store/memory.go | 13 +- file_store/mysql.go | 272 ++++++++++++----- file_store/mysql_test.go | 10 +- file_store/uploader.go | 85 ++++++ glob/glob.go | 10 +- glob/glob_test.go | 4 +- logging/logging.go | 9 +- reporting/container.go | 8 +- uploads/api.go | 29 ++ uploads/client_uploader.go | 90 ++++++ .../uploader.go => uploads/file_based.go | 99 +----- vql/acls.go | 5 + vql/filesystem/filesystem.go | 9 +- vql/filesystem/raw_registry.go | 9 +- vql/networking/upload.go | 13 +- vql/server/flows.go | 10 +- vql/server/monitoring.go | 26 +- vql/server/results.go | 10 +- vql/tools/gcs_upload.go | 8 +- vql/tools/s3_upload.go | 10 +- 33 files changed, 1145 insertions(+), 657 deletions(-) delete mode 100644 file_store/accessor.go create mode 100644 file_store/adapter.go create mode 100644 file_store/directory.go create mode 100644 file_store/directory_accessor.go create mode 100644 file_store/uploader.go create mode 100644 uploads/api.go create mode 100644 uploads/client_uploader.go rename vql/networking/uploader.go => uploads/file_based.go (57%) diff --git a/actions/vql.go b/actions/vql.go index 231c1e87a38..070167f15af 100644 --- a/actions/vql.go +++ b/actions/vql.go @@ -33,8 +33,8 @@ import ( crypto_proto "www.velocidex.com/golang/velociraptor/crypto/proto" "www.velocidex.com/golang/velociraptor/logging" "www.velocidex.com/golang/velociraptor/responder" + "www.velocidex.com/golang/velociraptor/uploads" vql_subsystem "www.velocidex.com/golang/velociraptor/vql" - vql_networking "www.velocidex.com/golang/velociraptor/vql/networking" "www.velocidex.com/golang/vfilter" ) @@ -96,7 +96,7 @@ func (self VQLClientAction) StartQuery( // Create a new query environment and store some useful // objects in there. VQL plugins may then use the environment // to communicate with the server. - uploader := &vql_networking.VelociraptorUploader{ + uploader := &uploads.VelociraptorUploader{ Responder: responder, } diff --git a/artifacts/assets/ab0x.go b/artifacts/assets/ab0x.go index 2fb9417d202..629e7f2fc59 100644 --- a/artifacts/assets/ab0x.go +++ b/artifacts/assets/ab0x.go @@ -322,7 +322,7 @@ var FileArtifactsDefinitionsWindowsForensicsBamYaml = []byte("\x1f\x8b\x08\x00\x var FileArtifactsDefinitionsWindowsForensicsFilenameSearchYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x6c\x93\x6f\x8b\x1b\x47\x0c\xc6\xdf\xef\xa7\x78\x30\x7d\x91\xd0\xcb\x26\x29\x7d\x65\x30\xe5\xe8\xc5\x34\xd0\xe3\xca\xf9\xa0\x14\x0c\x45\x9d\xd5\xda\x82\xdd\x99\xbd\x91\xd6\xbe\x85\x7c\xf8\xa2\x59\xdb\xb9\x3f\xf1\xab\xd9\x91\x46\xcf\xa3\x9f\xe4\x48\x3d\x2f\xf1\xb7\xc4\x26\x1d\xb5\x5e\xa7\xcc\x51\x25\x68\xbd\x96\x8e\x3d\xb6\x61\xca\x61\x5f\x35\xac\x21\xcb\x60\x92\xe2\x12\xdf\x2a\xe0\x46\x1a\x10\x74\xe0\x20\xad\x04\xb4\xd2\x31\xf8\x49\xd4\x90\x22\x6c\x2f\x8a\x9e\xc2\x5e\x22\x43\xfc\x9b\x31\x90\xc7\x32\x9a\xc4\x0a\xb1\x0a\x50\x93\xae\xfb\xf1\xa3\xdf\xaa\x0a\x78\xf0\x8b\x90\xfa\x3e\x45\x3c\x8e\xac\x2e\xee\xdf\xac\x18\x07\xb4\x99\x1f\x47\x8e\xd6\x4d\xae\x10\x48\x59\x91\x5a\x7c\xfd\xcb\xd5\x5a\xbb\xaa\x80\x46\x34\xa4\x03\xe7\x09\x14\x1b\x24\xdb\x73\x46\x4f\x66\x9c\xb5\xc6\x5d\x64\x1c\x69\x82\x25\x50\xd4\x23\xe7\xd9\xc0\x45\x48\x14\x96\xdc\x65\x01\x50\x5a\xf8\xe9\x76\xfd\x30\x77\xda\xa6\x0c\x8a\x13\x32\xb7\x9c\x39\x06\xf6\xe4\x92\xf3\x82\x88\x03\xac\xf1\xb5\xad\x50\x62\xe7\x2b\xaf\xdd\x92\xe4\x6e\xc2\x18\xe5\x71\x64\x8f\x46\x10\x86\xa4\x62\x72\x60\xec\xe5\x84\x84\x0c\xfe\xa2\x02\x76\x1c\x39\x53\xd7\x4d\xe8\x99\xa2\x5e\x0a\xe2\x48\x8a\x21\xb3\x72\xb4\xda\xb1\x6d\xa4\x1f\xba\x09\x0d\x1b\xe7\x5e\xa2\xc4\xdd\x5c\x87\xbe\xeb\x17\xe6\xdc\xb8\x04\x45\x70\x6c\x86\x24\xd1\x4e\x93\xaa\x30\xcf\x4a\x14\x2a\xbb\xe8\xcd\x50\xb4\xd2\xb2\x26\x37\x1f\x0f\x8e\x68\x47\x8e\x49\xeb\xcb\xa4\x28\x9b\xb4\x14\x0c\x34\x0c\x9d\xb0\x82\xf0\xcf\xf5\xfd\xf5\x19\x60\x41\x06\x65\xf3\x29\x9d\x9d\xf8\xc8\x2a\x40\xa2\x71\xe6\xf3\x1a\x3c\x03\x5d\x63\x7d\x22\xbd\x17\xbb\x2a\xb1\x8b\x4c\x61\x26\x0d\x47\x93\x56\xb8\x39\x31\xf6\x97\x1c\x2d\x4f\x38\xee\x39\x17\xb2\x85\xa6\x53\x6a\xd3\x18\x9b\xb2\x0b\xbe\x04\xfd\x60\x65\x6a\x99\x35\x75\x07\x2e\x90\xbc\x88\xef\x03\x28\xd8\x48\xdd\xf7\x21\x56\xd5\x40\x99\x7a\x67\xaa\xcb\x0a\x00\x3e\x60\xfe\xeb\x4c\x94\xe9\x7e\xec\xb8\x5c\x02\x0d\xb7\x34\x76\xb6\xc4\x51\x1a\x46\x4c\xbe\x98\xcb\x7e\x82\x72\xc8\x6c\x73\x53\xf6\x64\x2f\x4a\xdc\xf0\x41\xc2\x9b\x02\x8b\xed\x76\xbb\xad\xb7\xdb\xb0\x5c\x54\x95\xa6\x31\x07\x2e\xd2\x1f\x7c\x47\xb3\xf0\xc9\x87\x5f\x7c\x3b\x9d\x80\xcd\x97\x3f\xbf\xfc\xfe\x80\x8d\x65\x89\xbb\xfa\xae\x6d\x1d\xf8\xf5\x06\xf3\xe9\xea\x92\x77\x4e\x9f\xf3\xfe\xe0\xa7\x1b\x32\xf2\xc4\xd3\xf1\x4d\xe6\x40\x59\xf9\xdf\x68\xad\xbe\x6b\x8a\xdd\xd5\xec\xfa\x4d\xe2\xb3\x5f\xdf\xda\xea\xa5\x93\x8f\xf8\xfc\xe9\x97\x5f\xdf\xbb\xd0\xed\xfa\xe1\xf2\x74\x7d\x7f\x77\x5b\x40\xbe\x7b\x59\x2d\x8f\x1d\xeb\xea\x4c\xf8\xaa\xd0\xd3\x93\x30\x7e\xc6\xe2\xa3\x2f\xca\xe2\x95\x05\x8e\xcd\xea\xf3\xa7\xcb\xef\x55\x34\x8e\xfd\x7f\x9c\x4b\xc2\xab\x08\x85\xc0\xaa\x29\xaf\x16\xde\xe5\xe2\x7d\xf5\x7f\x00\x00\x00\xff\xff\xc3\xd5\xf8\x54\x1b\x05\x00\x00") // FileArtifactsDefinitionsWindowsForensicsPrefetchYaml is "artifacts/definitions/Windows/Forensics/Prefetch.yaml" -var FileArtifactsDefinitionsWindowsForensicsPrefetchYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x57\x4d\x73\xdb\x36\x10\xbd\xf3\x57\xbc\xf1\x25\x52\xc6\xa1\xda\x5b\xaa\x09\x33\x75\x1c\x29\x39\xd8\x91\x2b\x2b\xc9\xb8\x93\x0b\x44\x2e\x45\x4c\x48\x80\x05\x40\xcb\x4a\x9b\xfe\xf6\x0e\xc0\x0f\xf3\x4b\xb1\xc7\xbd\xf4\x50\x1e\x32\x31\x81\x7d\xfb\x16\xfb\xde\x82\x12\x2c\xa3\x39\x3e\x73\x11\xc9\xbd\xf6\x97\x52\x91\xd0\x3c\xd4\xfe\x95\xa2\x98\x4c\x98\x78\x11\xe9\x50\xf1\xdc\x70\x29\xe6\xf8\xcb\x43\xbd\x17\x5f\x89\x72\x0d\x86\x90\x85\x09\x41\xc6\xc8\xab\x10\xc4\x3c\x25\xed\xe3\x73\x42\x02\x4c\x80\xee\x28\x2c\x0c\xdb\xa6\x04\xae\xa1\x0a\x71\xea\x01\x26\x21\xe8\x83\x36\x94\x41\x51\x28\x55\xa4\x91\x2b\x99\x93\x32\x9c\x34\xd8\x56\x16\xc6\xed\x69\x05\x1b\x89\x8c\x7d\x25\x70\x83\x98\x69\x43\xca\xc2\x48\x0b\x08\x41\x77\x06\x86\x67\xe4\xe3\xcd\x01\x39\x53\x9a\x8b\x1d\x4c\xc2\x35\xb8\x88\xa5\xca\x98\xe5\x8f\x3d\x81\x29\x42\x85\xe6\x01\x11\x19\x52\x19\x17\x84\xbd\x25\xbb\xe5\x82\x29\x97\x5f\x91\xc3\xe5\xc2\x91\xc8\x99\x36\x3e\x56\xa2\xae\xfd\xe7\x9f\x2c\x54\xc8\x04\x34\x51\x55\x4c\xca\xb4\xc1\xcb\x8a\xaf\x4d\x66\xe9\x68\x30\x11\x21\x54\xc4\x9a\x57\x98\xfc\x82\x5c\x1a\x12\x86\xb3\xf4\x7e\xbb\x9e\xfa\x9e\x07\x6c\x12\x52\x25\x49\x4d\xb7\xa4\x58\x6a\x8b\x61\x99\xa5\xf9\x4c\x83\xdd\x32\x9e\x72\xcb\x3e\x96\xaa\x2c\x8f\x29\xc3\x63\x16\x1a\xdf\x03\x80\x17\x88\x98\xa1\xb3\xd8\x90\x02\x09\x5b\xa7\x86\x26\xa6\x6c\x53\xa4\xba\xef\x10\xdd\xf2\x88\x44\x48\x60\x6e\xab\x43\xb2\x91\x6d\x94\x37\x14\x4b\x45\x8f\x83\xd9\x96\x7b\x07\x38\xee\x40\x0f\x6b\xda\xd1\x5d\x03\x64\xa4\x15\x88\x4d\x2b\xab\x13\x3f\xc0\xaa\xf0\x14\xe4\xef\x2c\x64\xea\xd3\x5d\x83\x90\x30\x9d\xfc\x20\xbe\xa1\x62\xf7\xf9\x9e\x67\xff\x54\x96\xd2\xdc\x73\xd1\xc6\xe4\x7a\x3e\x9b\xed\xf7\x7b\x3f\xae\xb5\xbd\xe7\x5f\xb9\x2f\xd5\x6e\x66\xff\x33\x6b\x84\xee\xb1\xc2\x24\x52\xcd\x91\x31\x63\x12\xda\xfb\x3b\x45\x24\x7e\x0d\x0f\x5b\x52\xc4\xb4\x14\x7e\x28\x33\xcf\x6b\xfa\xa1\xe7\x15\xc5\xd2\x42\x35\x93\x77\xa9\xdc\x6a\xb7\x62\xe5\x15\xb3\x22\x35\x73\x9c\xcf\xbf\x54\xd2\xf9\x52\xe7\xfb\xf2\xdc\xcf\xe3\x0e\x42\xd3\xba\x26\xba\xe5\xbc\x93\xd6\xf9\xd3\x2d\x09\xa3\x07\xbd\xc3\xcd\xcd\xcd\xcd\x8b\xcb\xcb\x17\x6f\xdf\x6e\xb2\x6c\x9e\x24\x73\xad\x7f\x3f\xa9\xc0\xcc\x21\xa7\x79\xa9\x49\xc3\xb2\x7c\x90\xb8\xec\xf6\x23\x33\x0f\xda\xfd\xe4\xd4\x2d\x81\x8c\xe6\x2e\x5b\x2f\xe3\xf6\x1c\xb0\x91\xfe\x49\x07\xa6\x51\xc9\x8f\x41\xba\x6a\x39\xf1\xbc\xdc\xce\x1e\x11\xf1\x72\xe3\xf5\xe2\x62\x71\xbe\xc1\xea\x1a\x4b\x25\x33\x37\x39\x26\x53\x3b\x19\x14\xd9\x97\x01\x9e\xed\xcb\x26\x3e\xf3\x3c\x2d\x0b\x15\x92\x2e\x65\xf6\x47\x41\xea\x50\x4e\xc7\xf2\x99\xcd\x70\xc5\x94\xa6\xde\x4c\x74\xd3\x80\xe5\x79\x7a\x80\xa8\xc7\x41\xa9\x65\xdd\x84\x5e\x2c\x36\xc8\x63\xbc\x0a\x6a\x3a\xcf\xb1\x5c\xaf\x2e\xed\xf1\x13\x0b\x93\x49\xb3\xb1\x7c\x94\xdc\x07\x7f\xf6\xde\x01\xbd\xd8\x5d\x2a\xb7\x13\xfb\x8f\x0e\x3a\x32\x9d\xf6\x02\xbf\x9f\xf6\x5e\xb8\xc2\x46\xf0\x4b\xf8\x61\x5a\x00\x8b\xa6\x51\x7d\xb0\xf2\x59\xf2\x94\xae\xf9\xb7\x23\xab\xef\x99\x4e\xc6\x57\x3e\x91\xd2\x5c\x8a\xf1\xc5\x0b\xa6\xcd\xba\x10\x1b\xab\xb1\xf1\x1d\xeb\x42\x9c\xcb\x42\x98\xf1\xd5\xd9\xcc\xf1\xd2\x67\x61\x48\x5a\x53\x74\x84\x7a\x91\xa6\x57\xcc\x1c\x21\xf8\x81\x65\x84\xb3\x6b\xd4\x06\xb7\x80\xf6\xdd\xf8\xee\xc6\x0e\x13\xca\x65\x98\x04\xe7\xee\xf6\xd2\x14\x4e\xc1\x34\xce\xab\x1b\xc3\x16\xf4\xb8\xf8\xcb\x4e\xfc\xa5\x8c\x78\xcc\xc3\x06\x63\x08\xe1\x74\x51\x8b\x61\x62\xd5\x69\x9d\x14\xd4\x25\xf6\x95\x01\xe0\xf3\xfb\xc5\x7a\x31\xca\x85\xc7\x93\xc6\x45\x41\xcb\xd1\xa7\xf6\x62\x14\x41\x4b\x12\x08\xfe\x6e\x5b\x7e\xbc\x34\x4a\x35\x05\x9b\xf5\xc7\xc5\x14\x67\x1f\xde\x3e\x9c\xb1\x31\x7f\x9d\xcf\x8a\xc8\x66\xba\x5f\x78\x20\x4f\xdf\x06\x53\xaf\xed\xe4\x65\xca\x8c\x21\x25\xec\xcd\x53\x5e\x42\x1d\xe7\xfa\x58\x53\x46\xd9\x96\x14\x3e\xfd\x76\x61\x3f\x71\x52\xf6\xed\xe0\x77\x0c\xdd\x5c\xf3\x4e\xa1\x18\x78\xdb\x65\x10\x7d\x6f\x57\xfe\x43\x80\x11\x87\xb7\x4c\x3e\x5e\x1e\x5a\x8a\xb5\x9a\x58\x3a\xba\x14\x1d\x57\x30\x7a\x4e\xb2\x51\x8b\x36\xf3\x71\x57\x38\x25\xc5\x83\xb5\xef\xdd\x63\x1d\x97\x4f\xa7\x91\xcd\x05\x58\x35\xb2\x93\x1b\xaf\x5b\x9a\xd7\x46\x71\xb1\xbb\x0f\x98\x3e\x49\x49\x83\xe4\xe5\x25\x38\x9a\xfd\xd5\x78\xf6\x32\xe2\xc1\xf4\x5e\x47\x0d\x61\xcb\xdd\xff\x69\x31\xb4\xc7\xd0\xbf\x13\x03\x6a\x05\x34\x83\x18\xaf\xf1\xf2\x7f\xcd\x8c\x3e\x47\x06\xd3\xbb\xf5\xea\xe3\x15\xde\xdc\xf4\xda\xd0\x19\x55\x52\x41\x9b\x68\x55\x98\xbc\x30\xd8\x73\x93\x38\x0a\xd5\xa4\xb2\x3f\x5a\x04\x51\x64\x07\xd9\x4e\xc9\x22\xc7\xf6\xd0\xa8\xa2\xa3\x50\x1b\x54\x4b\xe4\x5e\xa0\x6d\xd1\x74\x98\xb9\xae\x87\x09\xe3\x23\xaa\x65\x56\xb1\x3d\x89\xf7\xc6\xe1\xe0\xb3\x03\xd8\x8e\x44\x75\x6d\xd3\xd7\x4a\x73\x3a\x1d\x96\x47\xab\xba\x76\xa7\x34\x34\xdf\xf8\x57\xd6\xd1\xef\xac\xea\xe9\x82\xb4\xf3\x8c\x86\x8c\x14\x7c\xec\x4b\xab\x8d\xfe\x44\xc7\x35\xbe\x0f\x8e\x77\x10\xbd\x6b\x6f\x90\xd2\xa5\x69\x8b\x1f\x01\x26\xad\x9f\x89\xab\x35\x8e\x39\xcb\x39\xa3\xdf\xcc\x91\x5e\xf4\x4e\xc5\xb9\xa0\x1f\x96\xc7\x8e\xe7\x3f\x01\x00\x00\xff\xff\x28\xcb\xdf\x20\xb6\x10\x00\x00") +var FileArtifactsDefinitionsWindowsForensicsPrefetchYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x57\x4d\x73\xdb\x36\x10\xbd\xf3\x57\xbc\xf1\x25\x52\xc6\xa1\xda\x5b\xaa\x09\x33\x75\x1c\x29\x39\xd8\x91\x2b\x2b\xc9\xb8\x93\x0b\x44\x2e\x45\x4c\x48\x80\x05\x40\xcb\x4a\x9b\xfe\xf6\x0e\xc0\x0f\xf3\x4b\xb1\xc7\xbd\xf4\x50\x1e\x3c\x1e\x02\xfb\xf6\x2d\xf6\xbd\x05\x25\x58\x46\x73\x7c\xe6\x22\x92\x7b\xed\x2f\xa5\x22\xa1\x79\xa8\xfd\x2b\x45\x31\x99\x30\xf1\x22\xd2\xa1\xe2\xb9\xe1\x52\xcc\xf1\x97\x87\x7a\x2f\xbe\x12\xe5\x1a\x0c\x21\x0b\x13\x82\x8c\x91\x57\x21\x88\x79\x4a\xda\xc7\xe7\x84\x04\x98\x00\xdd\x51\x58\x18\xb6\x4d\x09\x5c\x43\x15\xe2\xd4\x03\x4c\x42\xd0\x07\x6d\x28\x83\xa2\x50\xaa\x48\x23\x57\x32\x27\x65\x38\x69\xb0\xad\x2c\x8c\xdb\xd3\x0a\x36\x12\x19\xfb\x4a\xe0\x06\x31\xd3\x86\x94\x85\x91\x16\x10\x82\xee\x0c\x0c\xcf\xc8\xc7\x9b\x03\x72\xa6\x34\x17\x3b\x98\x84\x6b\x70\x11\x4b\x95\x31\xcb\x1f\x7b\x02\x53\x84\x0a\xcd\x03\x22\x32\xa4\x32\x2e\x08\x7b\x4b\x76\xcb\x05\x53\x2e\xbf\x22\x87\xcb\x85\x23\x91\x33\x6d\x7c\xac\x44\x5d\xfb\xcf\x3f\x59\xa8\x90\x09\x68\xa2\xaa\x98\x94\x69\x83\x97\x15\x5f\x9b\xcc\xd2\xd1\x60\x22\x42\xa8\x88\x35\xaf\x30\xf9\x05\xb9\x34\x24\x0c\x67\xe9\xfd\x76\x3d\xf5\x3d\x0f\xd8\x24\xa4\x4a\x92\x9a\x6e\x49\xb1\xd4\x16\xc3\x32\x4b\xf3\x99\x06\xbb\x65\x3c\xe5\x96\x7d\x2c\x55\x59\x1e\x53\x86\xc7\x2c\x34\xbe\x07\x00\x2f\x10\x31\x43\x67\xb1\x21\x05\x12\xb6\x4e\x0d\x4d\x4c\xd9\xa6\x48\x75\xdf\x21\xba\xe5\x11\x89\x90\xc0\xdc\x56\x87\x64\x23\xdb\x28\x6f\x28\x96\x8a\x1e\x07\xb3\x2d\xf7\x0e\x70\xdc\x81\x1e\xd6\xb4\xa3\xbb\x06\xc8\x48\x2b\x10\x9b\x56\x56\x27\x7e\x80\x55\xe1\x29\xc8\xdf\x59\xc8\xd4\xa7\xbb\x06\x21\x61\x3a\xf9\x41\x7c\x43\xc5\xee\x2b\x63\x3c\xfb\x4a\x59\x5a\x73\xcf\x21\x18\x93\xeb\xf9\x6c\xb6\xdf\xef\xfd\xb8\xd6\xf7\x9e\x7f\xe5\xbe\x54\xbb\x99\xfd\x67\xd6\x88\xdd\x63\x85\x49\xa4\x9a\x23\x63\xc6\x24\xb4\xf7\x77\x8a\x48\xfc\x1a\x1e\xb6\xa4\x88\x69\x29\xfc\x50\x66\x9e\xd7\xf4\x44\xcf\x2b\x9a\xa5\x8d\x6a\x36\xef\x52\xb9\xd5\x6e\xc5\x4a\x2c\x66\x45\x6a\xe6\x38\x9f\x7f\xa9\xe4\xf3\xa5\xce\xf7\xe5\xb9\x9f\xc7\x1d\x84\xa6\x7d\x4d\x74\xcb\x7d\x27\xad\x1e\xd0\x2d\x09\xa3\x07\xfd\xc3\xcd\xcd\xcd\xcd\x8b\xcb\xcb\x17\x6f\xdf\x6e\xb2\x6c\x9e\x24\x73\xad\x7f\x3f\xa9\xc0\xcc\x21\xa7\x79\xa9\x4b\xc3\xb2\x7c\x90\xb8\xec\xf8\x23\x33\x0f\x5a\xfe\xe4\xd4\x2d\x91\x8c\xe6\x2e\xdb\x2f\xe3\xf6\x2c\xb0\x91\xfe\x49\x07\xa6\x51\xca\x8f\x41\xba\x8a\x39\xf1\xbc\xdc\xce\x1f\x11\xf1\x72\xe3\xf5\xe2\x62\x71\xbe\xc1\xea\x1a\x4b\x25\x33\x37\x3d\x26\x53\x3b\x1d\x14\xd9\x97\x01\x9e\xed\xcb\x26\x3e\xf3\x3c\x2d\x0b\x15\x92\x2e\x65\xf6\x47\x41\xea\x50\x4e\xc8\xf2\x99\xcd\x70\xc5\x94\xa6\xde\x5c\x74\x13\x81\xe5\x79\x7a\x80\xa8\x47\x42\xa9\x67\xdd\x84\x5e\x2c\x36\xc8\x63\xbc\x0a\x6a\x3a\xcf\xb1\x5c\xaf\x2e\xed\xf1\x13\x0b\x93\x49\xb3\xb1\x7c\x94\xdc\x07\x7f\xf6\xde\x01\xbd\xd8\x5d\x2a\xb7\x13\xfb\x47\x07\x1d\x99\x4e\x7b\x81\xdf\x4f\x7b\x2f\x5c\x61\x23\xf8\x25\xfc\x30\x2d\x80\x45\xd3\xa8\x3e\x58\xf9\x2c\x79\x4a\xd7\xfc\xdb\x91\xd5\xf7\x4c\x27\xe3\x2b\x9f\x48\x69\x2e\xc5\xf8\xe2\x05\xd3\x66\x5d\x88\x8d\xd5\xd8\xf8\x8e\x75\x21\xce\x65\x21\xcc\xf8\xea\x6c\xe6\x78\xe9\xb3\x30\x24\xad\x29\x3a\x42\xbd\x48\xd3\x2b\x66\x8e\x10\xfc\xc0\x32\xc2\xd9\x35\x6a\x83\x5b\x40\xfb\x6e\x7c\x77\x63\x87\x09\xe5\x32\x4c\x82\x73\x77\x83\x69\x0a\xa7\x60\x1a\xe7\xd5\xad\x61\x0b\x7a\x5c\xfc\x65\x27\xfe\x52\x46\x3c\xe6\x61\x83\x31\x84\x70\xba\xa8\xc5\x30\xb1\xea\xb4\x4e\x0a\xea\x12\xfb\xca\x00\xf0\xf9\xfd\x62\xbd\x18\xe5\xc2\xe3\x49\xe3\xa2\xa0\xe5\xe8\x53\x7b\x39\x8a\xa0\x25\x09\x04\x7f\xb7\x2d\x3f\x5e\x1a\xa5\x9a\x82\xcd\xfa\xe3\x62\x8a\xb3\x0f\x6f\x1f\xce\xd8\x98\xbf\xce\x67\x45\x64\x33\xdd\x2f\x3c\x90\xa7\x6f\x83\xa9\xd7\x76\xf2\x32\x65\xc6\x90\x12\xf6\xf6\x29\x2f\xa2\x8e\x73\x7d\xac\x29\xa3\x6c\x4b\x0a\x9f\x7e\xbb\xb0\x9f\x39\x29\xfb\x76\xf0\x3b\x86\x6e\xae\x7a\xa7\x50\x0c\xbc\xed\x32\x88\xbe\xb7\x2b\xff\x21\xc0\x88\xc3\x5b\x26\x1f\x2f\x0f\x2d\xc5\x5a\x4d\x2c\x1d\x5d\x8a\x8e\x2b\x18\x3d\x27\xd9\xa8\x45\x9b\xf9\xb8\x2b\x9c\x92\xe2\xc1\xda\xf7\xee\xb1\x8e\xcb\xa7\xd3\xc8\xe6\x02\xac\x1a\xd9\xc9\x8d\xd7\x2d\xcd\x6b\xa3\xb8\xd8\xdd\x07\x4c\x9f\xa4\xa4\x41\xf2\xf2\x12\x1c\xcd\xfe\x6a\x3c\x7b\x19\xf1\x60\xfa\x8e\x18\xc2\x96\xb9\xff\xd3\x5a\x68\x4f\xa1\x7f\xa7\x05\xd4\x02\x68\xe6\x30\x5e\xe3\xe5\xff\x92\x19\x7d\x8e\xcc\xa5\x77\xeb\xd5\xc7\x2b\xbc\xb9\xe9\xb5\xa1\x33\xa9\xa4\x82\x36\xd1\xaa\x30\x79\x61\xb0\xe7\x26\x71\x14\xaa\x41\x65\x7f\xb7\x08\xa2\xc8\xce\xb1\x9d\x92\x45\x8e\xed\xa1\x51\x45\x47\xa1\x36\xa8\x96\xc8\xbd\x40\xdb\xa2\xe9\x30\x73\x5d\x0f\x13\xc6\x47\x54\xcb\xac\x62\x7b\x12\xef\x4d\xc3\xc1\x57\x07\xb0\x1d\x89\xea\xda\xa6\xaf\x95\xe6\x74\x3a\x2c\x8f\x56\x75\xed\x4e\x69\x68\xbe\xf1\x8f\xac\xa3\x9f\x59\xd5\xd3\x05\x69\xe7\x19\x0d\x19\x29\xf8\xd8\x87\x56\x1b\xfd\x89\x8e\x6b\x7c\x1f\x1c\xef\x20\x7a\xb7\xde\x20\xa5\x4b\xd3\x16\x3f\x02\x4c\x5a\xbf\x14\x57\x6b\x1c\x73\x96\x73\x46\xbf\x99\x23\xbd\xe8\x9d\x8a\x73\x41\x3f\x2c\x8f\x1d\xcf\x7f\x02\x00\x00\xff\xff\x53\xd6\x76\x58\xb9\x10\x00\x00") // FileArtifactsDefinitionsWindowsForensicsRecentAppsYaml is "artifacts/definitions/Windows/Forensics/RecentApps.yaml" var FileArtifactsDefinitionsWindowsForensicsRecentAppsYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x54\x4d\x6f\xe3\x36\x10\xbd\xf3\x57\x0c\x7c\x59\x3b\xb5\xa5\xe6\x50\x14\x30\xaa\x2d\x84\xac\xdd\x0d\x36\xe9\x1a\xb1\xd3\x3d\x54\x6d\xc0\xa5\x46\x16\x11\x89\x54\x67\xa8\x2a\x42\x3f\x7e\x7b\x41\x4b\x8e\xad\xd8\xe9\x07\xaa\x83\x24\x7e\xbd\x79\x33\xef\x71\x8c\x2c\x71\x0e\x9f\xb4\x49\x6d\xc3\xc1\xd2\x12\x1a\xd6\x8a\x83\x3b\x54\x68\x5c\x5c\x55\x2c\x52\x64\x45\xba\x72\xda\x9a\x39\xfc\x2e\x00\xbe\xbb\xbf\x86\x15\xd9\x2d\xc9\x12\xf0\x09\x55\xed\x97\xa0\x90\xb5\x51\x39\xa6\x60\x0d\xb8\x1c\x3d\xe6\xe5\x97\xc0\x2d\x3b\x2c\x41\x33\x38\x92\xea\x11\x53\xd0\xbb\x65\x01\x70\x08\x01\x8f\xd8\x0a\x41\x98\x21\xa1\x51\x38\x17\x00\x33\xc8\x9d\xab\x78\x1e\x86\x4d\xd3\x04\x2c\x0d\x07\x96\xb6\x21\xa3\xaa\x49\xbb\x76\x46\xc8\xb6\x26\x85\x1c\x56\x96\x1d\x12\x87\x4d\x97\xc3\x2c\xdb\xe7\x30\xc3\x5f\x75\xea\xf1\x66\x36\x0b\xbf\xfe\x2a\x4c\x6d\x63\x0a\x2b\x53\x21\x2a\x42\x65\x4d\xaa\xbb\x94\xd6\x8b\x9b\xc5\xd5\x06\x3e\xae\x61\x49\xb6\x04\x6d\x32\x3b\x9e\x40\x93\x23\xa1\x9f\x8c\xe0\x4d\x0f\xfd\x46\x88\x4a\x92\x2c\xd1\xc7\xeb\x48\x76\xe5\xbb\x67\xa4\xa5\x2e\x1c\x92\x00\x00\x48\x31\x93\x75\xe1\xe6\x30\x1a\xf5\xe3\xa3\x02\x5e\x67\xc0\x15\x2a\x9d\x69\x4c\xa1\x41\xc8\x76\xe7\xe0\x73\x0b\x2e\xd7\x0c\x35\x23\xc1\xf5\xbb\x40\x1c\xc1\x2f\xf6\x35\xde\xe8\x12\xe3\xec\xb5\x30\xae\xad\x70\x0e\x4e\x97\xc8\x4e\x96\xd5\x3f\x84\xb6\xa6\x68\x81\x73\xdb\x1c\x24\x64\x90\x1e\xbc\x23\xe2\x71\x06\x2c\x0e\x72\x7d\xc0\x76\x48\x60\x6d\x33\xd7\x48\xc2\xe4\x56\x2b\xb2\x6c\x33\x97\xf4\x8e\x4a\xae\x6a\x22\x34\xee\x07\x24\xd6\xd6\x24\x6b\x94\xa4\xf2\xe4\x80\x95\x5c\x88\x17\x85\x7c\x6f\x4b\xe4\x21\xfe\xd5\x3c\xf1\x2b\x9c\x5c\x24\xdf\x6f\xee\xd7\x8b\xbb\xe0\x5d\xbc\x11\xa2\x77\x40\xa7\xc4\x2f\x35\x92\xee\x06\xfe\x99\xc1\xcd\x62\x03\x9b\xdb\x15\x44\x7b\x81\x2f\x60\x79\xf7\xf1\x16\xbc\x3f\xa4\xca\xc7\xfd\x46\x00\x20\xdb\x44\xbf\x1d\x86\x00\xfb\x13\xcb\xba\x28\x56\xd2\xe5\xdd\xc1\x6d\x61\x3f\x8f\xfd\x8b\xa3\x67\x9e\x93\xc3\xb1\x3f\xa6\x87\x7f\x4f\xa6\x3d\x8f\x19\x57\xd5\x75\x3a\xf5\x1f\x8f\x3c\x85\x9b\xdd\xb5\xb9\xb2\xb5\x71\xd3\xc1\xfe\xfe\x79\xd6\x73\xdc\x68\x93\xe9\x02\xfd\x44\x74\x23\xd9\xc5\x4a\x21\x33\xa6\xde\x15\x13\x88\xd7\xe0\x27\x9f\xbd\xf2\x7f\xc0\x82\x7b\xa3\x9f\x4e\x10\x37\xeb\xb3\x98\x95\x24\xc6\x07\x76\xa4\xcd\xf6\xa1\xd1\x2e\x7f\x20\xdc\xe2\xd3\xf8\xdc\x5e\x00\xe8\x36\x46\x1f\xb0\x0d\xf6\xe5\x3d\x0b\xeb\x75\xf1\x38\xd1\x28\xdc\x69\x1f\x8e\xbf\x5d\x7d\xe3\xff\xde\xfe\xf8\x73\xf8\xd3\x17\x93\xd0\x38\x7f\x57\x82\x54\xba\xd1\x24\xf0\x0b\x9e\xb0\xff\x0e\xd0\x76\xca\x11\xca\xd4\xb3\x7a\x78\xc4\xf6\x84\x57\xa7\x68\x4d\xc5\x98\x55\x8e\x25\x46\x23\xe3\x32\x1e\x9d\x23\x55\x49\x97\x47\x7f\xc7\x3a\x23\xb9\x2d\xd1\xb8\x68\x70\x59\x26\xc1\x7a\x97\xf4\xc9\x09\xb9\x2b\xba\xa5\x68\x44\xb2\xf1\x04\x47\xc7\x7e\x9a\x88\x81\x97\xe3\xcb\x13\x2b\xeb\xec\x38\x9b\xe7\x96\x16\x1d\xfa\xd1\x71\x48\x97\xa3\x39\xef\xc9\x1e\xce\xdf\x96\x4f\xef\x17\x77\x8b\x5d\x19\x21\xfa\xf3\x65\x63\xeb\x89\x4d\x01\x0b\xc6\x68\x73\xbb\x3a\xa2\xf8\xef\x98\x9d\xb6\xb2\xff\xc4\x30\xbe\xec\x09\xbe\x30\x26\xbc\x7d\xad\x49\x0e\x29\xc7\x97\x13\xf1\x57\x00\x00\x00\xff\xff\x74\x63\x08\x6a\xf2\x06\x00\x00") @@ -493,7 +493,7 @@ var FileArtifactsDefinitionsWindowsSystemUntrustedBinariesYaml = []byte("\x1f\x8 var FileArtifactsDefinitionsWindowsSystemVADYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x6c\x50\xcb\x8e\x9b\x40\x10\xbc\xcf\x57\xd4\x65\xb5\x90\x60\x3e\x00\x89\x03\x4a\x88\x72\xd8\x4d\x2c\x58\x65\x0f\x51\x0e\x23\xa6\x61\x47\xf2\x3c\xd2\x33\x64\x71\x5e\xdf\x1e\x61\x30\xb2\xe5\xf4\x05\xa6\xab\xeb\xa1\xb2\xd2\x50\x81\x67\x6d\x95\x7b\x0d\x79\x7b\x0c\x91\x4c\xfe\xa5\x7a\x2f\x14\x85\x8e\xb5\x8f\xda\xd9\x02\xbf\x05\x50\xdb\xd1\x10\xcb\x48\x88\x2f\x04\x43\xc6\xf1\x11\x4c\x83\x76\x36\xc0\xf5\x20\xd9\xbd\x80\x47\x6b\xb5\x1d\xe0\xd9\x75\x14\x42\x2e\x84\x97\x2c\x0d\x45\xe2\x50\x08\x60\x87\xc5\x70\xc5\x1b\x1a\x68\x12\x00\x70\x65\x57\xcd\xba\x34\x41\x7a\x7f\xd0\xa4\x10\xdd\x99\x70\xa2\x87\x7c\xa5\xf4\x72\x3c\xc4\x02\xb9\x10\xc1\x8d\xdc\xd1\x6a\xf1\x7d\x24\xd6\xcb\x63\x9e\x1d\x1e\xea\xa7\xb3\x00\x05\x94\x68\xeb\x87\xfa\xdd\x13\xf6\x5a\x65\xf8\x24\x0d\xad\x87\xc0\x87\xe6\xf3\x23\x7c\x38\xe8\x10\x93\x74\xdb\x3e\x7f\xac\x9b\xfa\x74\x88\xf2\xef\x6d\xf4\xd9\x61\x55\x7c\xb3\x28\xf4\x8e\xe7\x36\x92\x4d\x01\x60\xf7\x5a\x6e\x11\xb2\x0b\x60\x0e\x7b\x2c\x7f\x5d\x6c\x70\x93\x2f\xbb\x42\xe7\xe9\x1d\x1b\x19\x93\xe5\x53\xde\xdf\x4d\xbb\xbb\xe9\x3e\x83\xe4\x21\x94\x5f\x2b\xa5\x98\x42\xc8\xb0\xfe\xbc\x6d\xf5\x4f\xfa\x96\xa2\x6a\xd1\x48\x3b\xfc\x47\x6e\xcf\x2e\x52\x37\x97\x9f\xe1\x51\x7a\xaf\xed\x70\xd5\xcb\xd6\xcd\x0f\xa9\x12\xaf\x55\xb9\xd7\x2a\xbd\x40\xff\xa4\xe2\x5f\x00\x00\x00\xff\xff\xf2\x61\x55\x2d\x49\x02\x00\x00") // FileArtifactsDefinitionsWindowsTimelinePrefetchYaml is "artifacts/definitions/Windows/Timeline/Prefetch.yaml" -var FileArtifactsDefinitionsWindowsTimelinePrefetchYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xe4\x57\x5f\x6f\xdb\x36\x10\x7f\xd7\xa7\x38\x18\x18\xea\xb4\x89\xdc\xbd\x75\x46\x5d\x2c\x4d\x9d\xf6\xa1\x69\x82\xc4\x5d\x91\xad\x43\x41\x4b\x27\x8b\xa8\x44\xaa\x3c\x2a\x8e\xd7\x76\x9f\x7d\x38\x52\x92\x25\x59\x4e\x82\xee\x65\xc0\xf4\x90\x18\xe4\xdd\xef\x8e\xbc\xdf\xfd\xa1\x12\x39\x4e\xe1\x83\x54\xb1\x5e\x53\xb8\x90\x39\x66\x52\x61\x78\x61\x30\x41\x1b\xa5\x41\x8c\x14\x19\x59\x58\xa9\xd5\x14\xbe\x05\x50\x8b\xc2\x67\xc4\x82\x40\x40\x24\xa2\x14\x41\x27\x50\x54\x2a\x90\xc8\x0c\x29\x84\x0f\x29\x2a\x10\x0a\xf0\x16\xa3\xd2\x8a\x65\x86\x20\x09\x4c\xa9\x0e\x03\x00\x9b\x22\xd0\x86\x2c\xe6\x60\x30\xd2\x26\x26\x28\x8c\x2e\xd0\x58\x89\x04\x62\xa9\x4b\xeb\x64\x5a\xca\x56\x43\x2e\x3e\x23\x48\x0b\x89\x20\x8b\x86\x61\x34\x03\x82\xc2\x5b\x0b\x56\xe6\x18\xc2\xcb\x0d\x14\xc2\x90\x54\x2b\xb0\xa9\x24\x90\x2a\xd1\x26\x17\xec\x3f\xac\x11\x84\x41\xa8\xd0\x02\x80\x18\x2d\x9a\x5c\x2a\x84\x35\x3b\xbb\x94\x4a\x18\x67\xdf\xa0\xc3\x95\xca\x39\x51\x08\xb2\x21\x9c\xab\xfa\xec\x3f\x3f\x65\xa8\x48\x28\x20\xc4\xea\x30\x99\x20\x0b\xcf\x2a\x7f\xd9\x18\xbb\x43\x20\x54\x0c\x91\x41\xd1\x2c\xc1\xf8\x17\x28\xb4\x45\x65\xa5\xc8\xb6\xe2\x74\x10\x02\x04\x00\xee\xcf\x82\x1d\x17\xc6\xca\x44\x44\x96\x2f\x4d\x38\x55\x0e\x4c\x0c\xba\xb4\x45\x69\xe1\x06\x0d\x31\xa6\x4e\xfc\x5d\x5a\xa1\x62\x61\x62\xa8\x03\xc7\x38\x35\x44\x08\x8b\x14\x8d\x3f\x3c\xe1\x0d\x1a\x91\xf1\x25\x89\x9c\x8f\xff\x88\x40\xdc\x08\x99\xc9\x65\x86\xb5\x0f\x47\x10\x0b\x8b\xc7\x89\x45\x03\xa8\xf8\xbe\x08\x08\x85\xe1\xe0\x6a\xb3\x8d\x34\xde\xc8\x18\x55\x84\x20\x9c\xa8\xbb\x70\xd6\xec\xe2\xbc\xc4\x44\x1b\x7c\x18\xd0\xd2\xcb\xb6\x91\x6a\x28\x17\x9d\xcd\x25\xae\xf0\xb6\xc1\xb2\x9a\xd9\xc6\xb6\x75\x15\xbe\x0d\x30\xa3\x0f\x01\xc3\x15\xa3\x66\x21\xde\xb6\xdc\x49\x05\xa5\x77\x20\x34\xfe\xb0\x1c\x9b\x06\x08\x02\x5e\x33\xec\xdc\x34\x70\x10\xd6\x16\x34\x9d\x4c\xd6\xeb\x75\xc8\xbe\x2a\x92\x11\xad\xe5\x67\x19\x6a\xb3\x9a\xf0\x8f\x49\x93\x3c\x81\x28\x6d\xaa\xcd\x14\x72\x61\x6d\x8a\xeb\x70\x65\x10\xd5\xaf\xd1\x66\x89\x06\x05\x69\x15\x46\x3a\x0f\x82\x26\x16\x34\xad\xfc\xf4\x59\x59\xbb\xf3\x3a\xd3\x4b\x72\x3b\x4c\xd9\x44\x94\x99\x9d\xc2\xc9\xf4\x63\x45\xc7\x8f\xb5\xbd\x8f\x8f\xc3\x22\xe9\x20\x34\x61\x6c\xb4\x5b\xd9\x3c\x6a\x45\x02\x6f\x50\x59\xda\x8d\xe3\xf5\xf5\xf5\xf5\xd1\xd9\xd9\xd1\xab\x57\x8b\x3c\x9f\xa6\xe9\x94\xe8\xf7\x51\x05\x66\x37\x05\x4e\x3d\xcf\xad\xc8\x8b\x1d\xc3\x3e\xee\x0f\xb4\xbc\x1b\xf8\x1f\x35\xdd\xe2\xc9\xa0\x6d\x1f\x7f\x9d\xb4\x6b\x0b\x6b\x86\xa3\x0e\x4c\x43\x95\xbb\x41\xba\x94\xa9\xfd\x0b\x0a\xae\x6a\x2a\x96\x5e\xfc\x6a\xfe\x76\x7e\xb2\x80\xf3\x2b\x38\x35\x3a\x77\x35\x69\x7c\xc0\x35\xc7\x20\x2f\xce\xe0\xd1\xda\x87\xf2\x51\x10\x90\x2e\x4d\x84\xe4\xc9\xf6\xa5\x44\x2e\x48\xd3\x0a\xf6\x08\xde\xce\x17\x90\x6a\xb2\xec\x23\x3c\x9f\xd5\xc8\xa7\x5f\x62\x05\xa7\x97\xe7\x67\x15\x76\x23\xff\xad\xfa\x05\x30\x99\xc0\x85\x30\x84\xbd\x3a\xed\x2a\x94\x28\x8a\x6c\x03\xaa\x2e\x51\x3e\x21\xa8\x51\x65\xa3\x45\xd2\x32\xf7\xd8\xdb\xe2\x88\x89\x28\x1d\x37\x82\xfe\x33\x7a\x3d\xfb\xda\x5b\x03\xe8\xe9\xae\x32\xbd\x1c\xf3\x1f\x9a\x75\x68\x7e\xd0\x53\xfc\x7e\xd8\x5b\xe0\x2b\xd9\x0c\xe0\x7b\xf8\x5d\xb3\x00\x30\x6f\x02\xdd\x07\xf3\xdf\xa9\xcc\xf0\x4a\xfe\xb5\x67\xf7\x8d\xa0\x74\x78\xe7\x37\x5f\x83\x87\x37\xdf\x0a\xb2\x97\xa5\xe2\x8e\x4a\xc3\x12\x97\xa5\x3a\xd1\xa5\xb2\xc3\xbb\x93\x89\xf3\x8b\x8e\xa3\x08\x89\x30\x3e\x84\x61\xdf\xcb\x2c\xbb\x10\x36\xdd\xb3\xfd\x8e\x79\x72\x7c\xd5\xb4\x05\x86\xe4\xb5\x61\x9b\x4d\x42\x8d\xb1\xd0\x51\x3a\x3b\x71\x3d\x95\x30\x3a\x00\x41\x70\x52\xf5\x31\x3e\xd2\xc3\xf4\xcf\x3a\xfa\x67\x3a\x96\x89\x8c\x1a\x8c\x5d\x08\xc7\x8c\x9a\x0e\x63\xe6\x27\xf3\x7c\x56\x9f\xb1\xcf\x0d\x00\xf8\xf0\x66\x7e\x39\x87\xe1\xb3\xcb\x64\xdc\xe4\xe0\xac\x55\x15\x0e\xb9\x63\xaa\x59\x8b\x16\x30\xfb\xbb\x5d\x36\x86\x0f\x87\x19\xe1\x6c\x71\xf9\x7e\x7e\x00\xc7\xef\x5e\xdd\x6f\xb1\x29\x20\xb5\x3d\x26\x12\x5b\xda\x6e\xdc\x63\xa7\x9f\x0a\x7b\xd2\xfa\x34\x13\xd6\xa2\x51\x2e\x95\xb7\xcd\xcc\xcf\x43\x9d\x24\x6e\xc6\x0d\xc7\x4a\xd8\xc9\x67\x07\xa4\xfa\xf9\x5c\xe5\x1c\xcc\xe0\xeb\xf0\x45\xd7\x28\xc3\xe7\x81\x16\x4b\x99\x06\xa7\xce\x43\x8c\x1d\x6b\xf7\x6a\xb4\xd3\x87\xb5\xe6\x6d\xd7\x87\x33\xc1\x91\x27\xd9\x75\xf1\x7b\xf7\x22\x3d\x65\x76\xa4\x3a\xb1\x6b\xfa\x66\x15\xbb\x8e\x75\x78\xd1\x22\x3a\x59\x23\xd5\x6a\xab\x70\xf0\x43\xe4\xd9\x31\xee\x7b\xe7\xa0\xf5\xe7\xc3\xd6\xbd\xc6\xbd\xe6\x07\x28\xc4\xcc\x88\x5a\xb9\xfd\xdf\x26\x46\xbb\x0a\xfd\x4b\x62\x40\xc3\x86\xba\x14\xc3\x0b\x78\xf6\x20\xfe\xfc\xbf\xe8\x03\xfb\xcb\xd2\xeb\xcb\xf3\xf7\x17\xf0\xf2\xfa\x01\x71\x00\x68\x0a\x75\xaf\x82\x9d\xfb\x37\x8d\x41\x2a\x33\x4b\x60\x50\xc4\x1b\x37\x1c\xd6\xcf\x9e\x0e\x57\x99\x8d\x95\x46\x43\xd4\x3b\xfa\x7e\x8b\x2b\x6e\xd6\xfc\x64\xf7\xf6\xaf\x7a\xb2\x0a\x79\x9e\xfa\xe3\xe9\x9f\xac\x53\xaf\x0d\x6b\x8c\xea\xc6\x3a\x62\x59\x7e\x79\xa2\xd9\x23\x39\xaf\xdf\x38\x3a\xd9\x7a\x36\x85\x11\x3c\x69\x77\xa2\x27\xe0\xdf\xab\x63\xff\x6f\x36\xda\xbe\xe8\xf8\x45\x1a\x39\x96\xfe\x74\x33\x3a\x04\x61\x56\x34\xab\x89\xeb\x1a\x6c\x8e\x44\x62\xb5\x7f\xc8\x69\xb2\x8b\x85\xfd\x9c\x39\x2c\xdb\xf2\x47\x90\x9b\x13\x3f\xed\xbf\x82\x7e\x42\xd6\xfd\xfb\x53\xb4\xff\xa2\xfb\xa3\x40\x47\x2f\xdf\xaf\x57\x4f\x6a\x1d\x79\xba\x73\x74\xeb\x88\xa6\xf7\xcd\x72\x1d\xe9\xea\x8d\xbd\x67\xb4\xea\x8f\x54\x1d\x55\xbe\xb3\xbb\xa7\xbe\xee\x55\xb9\x95\x8e\xbc\xab\x5a\x51\x2a\xe4\x40\xd9\xe5\x4f\xb8\xb2\xdb\xad\xd3\xbd\xfe\xbe\x33\x3b\xfb\x6f\x39\xa0\xd9\xad\xff\xed\x51\x63\xb7\x13\xf8\xdc\xeb\x41\x07\xff\x04\x00\x00\xff\xff\x53\x80\xe0\x26\x49\x12\x00\x00") +var FileArtifactsDefinitionsWindowsTimelinePrefetchYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xe4\x57\x5f\x6f\xdb\x36\x10\x7f\xd7\xa7\x38\x18\x18\xea\xb4\x89\xdc\xbd\x75\x46\x5d\x2c\x4d\x9d\xf6\xa1\x69\x82\xc4\x5d\x91\xad\x43\x41\x4b\x27\x8b\xa8\x44\xaa\x3c\x2a\x8e\xd7\x76\x9f\x7d\x38\x52\x92\x25\x59\x4e\x82\xee\x65\xc0\xf4\x90\x18\xe4\xdd\xef\x8e\xbc\xdf\xfd\xa1\x12\x39\x4e\xe1\x83\x54\xb1\x5e\x53\xb8\x90\x39\x66\x52\x61\x78\x61\x30\x41\x1b\xa5\x41\x8c\x14\x19\x59\x58\xa9\xd5\x14\xbe\x05\x50\x8b\xc2\x67\xc4\x82\x40\x40\x24\xa2\x14\x41\x27\x50\x54\x2a\x90\xc8\x0c\x29\x84\x0f\x29\x2a\x10\x0a\xf0\x16\xa3\xd2\x8a\x65\x86\x20\x09\x4c\xa9\x0e\x03\x00\x9b\x22\xd0\x86\x2c\xe6\x60\x30\xd2\x26\x26\x28\x8c\x2e\xd0\x58\x89\x04\x62\xa9\x4b\xeb\x64\x5a\xca\x56\x43\x2e\x3e\x23\x48\x0b\x89\x20\x8b\x86\x61\x34\x03\x82\xc2\x5b\x0b\x56\xe6\x18\xc2\xcb\x0d\x14\xc2\x90\x54\x2b\xb0\xa9\x24\x90\x2a\xd1\x26\x17\xec\x3f\xac\x11\x84\x41\xa8\xd0\x02\x80\x18\x2d\x9a\x5c\x2a\x84\x35\x3b\xbb\x94\x4a\x18\x67\xdf\xa0\xc3\x95\xca\x39\x51\x08\xb2\x21\x9c\xab\xfa\xec\x3f\x3f\x65\xa8\x48\x28\x20\xc4\xea\x30\x99\x20\x0b\xcf\x2a\x7f\xd9\x18\xbb\x43\x20\x54\x0c\x91\x41\xd1\x2c\xc1\xf8\x17\x28\xb4\x45\x65\xa5\xc8\xb6\xe2\x74\x10\x02\x04\x00\xee\xcf\x82\x1d\x17\xc6\xca\x44\x44\x96\x2f\x4d\x38\x55\x0e\x4c\x0c\xba\xb4\x45\x69\xe1\x06\x0d\x31\xa6\x4e\xfc\x5d\x5a\xa1\x62\x61\x62\xa8\x03\xc7\x38\x35\x44\x08\x8b\x14\x8d\x3f\x3c\xe1\x0d\x1a\x91\xf1\x25\x89\x9c\x8f\xff\x88\x40\xdc\x08\x99\xc9\x65\x86\xb5\x0f\x47\x10\x0b\x8b\xc7\x89\x45\x03\xa8\xf8\xbe\x08\x08\x85\xe1\xe0\x6a\xb3\x8d\x34\xde\xc8\x18\x55\x84\x20\x9c\xa8\xbb\x70\xd6\xec\xe2\xbc\xc4\x44\x1b\x7c\x18\xd0\xd2\xcb\xb6\x91\x6a\x28\x17\x9d\xcd\x25\xae\xf0\xb6\xc1\xb2\x9a\xd9\xc6\xb6\x75\x15\xbe\x0d\x30\xa3\x0f\x01\xc3\x15\xa3\x66\x21\xde\xb6\xdc\x49\x05\xa5\x77\x20\x34\xfe\xb0\x1c\x9b\x06\x08\x02\x5e\x33\xec\xdc\x34\x70\x10\xd6\x16\x34\x9d\x4c\xd6\xeb\x75\xc8\xbe\x2a\x92\x11\xad\xe5\x67\x19\x6a\xb3\x9a\xf0\x8f\x49\x93\x3c\x81\x28\x6d\xaa\xcd\x14\x72\x61\x6d\x8a\xeb\x70\x65\x10\xd5\xaf\xd1\x66\x89\x06\x05\x69\x15\x46\x3a\x0f\x82\x26\x16\x34\xad\xfc\xf4\x59\x59\xbb\xf3\x3a\xd3\x4b\x72\x3b\x4c\xd9\x44\x94\x99\x9d\xc2\xc9\xf4\x63\x45\xc7\x8f\xb5\xbd\x8f\x8f\xc3\x22\xe9\x20\x34\x61\x6c\xb4\x5b\xd9\x3c\x6a\x45\x02\x6f\x50\x59\xda\x8d\xe3\xf5\xf5\xf5\xf5\xd1\xd9\xd9\xd1\xab\x57\x8b\x3c\x9f\xa6\xe9\x94\xe8\xf7\x51\x05\x66\x37\x05\x4e\x3d\xcf\xad\xc8\x8b\x1d\xc3\x3e\xee\x0f\xb4\xbc\x1b\xf8\x1f\x35\xdd\xe2\xc9\xa0\x6d\x1f\x7f\x9d\xb4\x6b\x0b\x6b\x86\xa3\x0e\x4c\x43\x95\xbb\x41\xba\x94\xa9\xfd\x0b\x0a\xae\x6a\x2a\x96\x5e\xfc\x6a\xfe\x76\x7e\xb2\x80\xf3\x2b\x38\x35\x3a\x77\x35\x69\x7c\xc0\x35\xc7\x20\x2f\xce\xe0\xd1\xda\x87\xf2\x51\x10\x90\x2e\x4d\x84\xe4\xc9\xf6\xa5\x44\x2e\x48\xd3\x0a\xf6\x08\xde\xce\x17\x90\x6a\xb2\xec\x23\x3c\x9f\xd5\xc8\xa7\x5f\x62\x05\xa7\x97\xe7\x67\x15\x76\x23\xff\xad\xfa\x05\x30\x99\xc0\x85\x30\x84\xbd\x3a\xed\x2a\x94\x28\x8a\x6c\x03\xaa\x2e\x51\x3e\x21\xa8\x51\x65\xa3\x45\xd2\x32\xf7\xd8\xdb\xe2\x88\x89\x28\x1d\x37\x82\xfe\x33\x7a\x3d\xfb\xda\x5b\x03\xe8\xe9\xae\x32\xbd\x1c\xf3\x1f\x9a\x75\x68\x7e\xd0\x53\xfc\x7e\xd8\x5b\xe0\x2b\xd9\x0c\xe0\x7b\xf8\x5d\xb3\x00\x30\x6f\x02\xdd\x07\xf3\xdf\xa9\xcc\xf0\x4a\xfe\xb5\x67\xf7\x8d\xa0\x74\x78\xe7\x37\x5f\x83\x87\x37\xdf\x0a\xb2\x97\xa5\xe2\x8e\x4a\xc3\x12\x97\xa5\x3a\xd1\xa5\xb2\xc3\xbb\x93\x89\xf3\x8b\x8e\xa3\x08\x89\x30\x3e\x84\x61\xdf\xcb\x2c\xbb\x10\x36\xdd\xb3\xfd\x8e\x79\x72\x7c\xd5\xb4\x05\x86\xe4\xb5\x61\x9b\x4d\x42\x8d\xb1\xd0\x51\x3a\x3b\x71\x3d\x95\x30\x3a\x00\x41\x70\x52\xf5\x31\x3e\xd2\xc3\xf4\xcf\x3a\xfa\x67\x3a\x96\x89\x8c\x1a\x8c\x5d\x08\xc7\x8c\x9a\x0e\x63\xe6\x27\xf3\x7c\x56\x9f\xb1\xcf\x0d\x00\xf8\xf0\x66\x7e\x39\x87\xe1\xb3\xcb\x64\xdc\xe4\xe0\xac\x55\x15\x0e\xb9\x63\xaa\x59\x8b\x16\x30\xfb\xbb\x5d\x36\x86\x0f\x87\x19\xe1\x6c\x71\xf9\x7e\x7e\x00\xc7\xef\x5e\xdd\x6f\xb1\x29\x20\xb5\x3d\x26\x12\x5b\xda\x6e\xdc\x63\xa7\x9f\x0a\x7b\xd2\xfa\x34\x13\xd6\xa2\x51\x2e\x95\xb7\xcd\xcc\xcf\x43\x9d\x24\x6e\xc6\x0d\xc7\x4a\xd8\xc9\x67\x07\xa4\xfa\xf9\x5c\xe5\x1c\xcc\xe0\xeb\xf0\x45\xd7\x28\xc3\xe7\x81\x16\x4b\x99\x06\xa7\xce\x43\x8c\x1d\x6b\xf7\x6a\xb4\xd3\x87\xb5\xe6\x6d\xd7\x87\x33\xc1\x91\x27\xd9\x75\xf1\x7b\xf7\x22\x3d\x65\x76\xa4\x3a\xb1\x6b\xfa\x66\x15\xbb\x8e\x75\x78\xd1\x22\x3a\x59\x23\xd5\x6a\xab\x70\xf0\x43\xe4\xd9\x31\xee\x7b\xe7\xa0\xf5\xe7\xc3\xd6\xbd\xc6\xbd\xe6\x3b\xdb\xaf\x2f\xcf\xdf\x5f\xc0\xcb\xeb\xc1\xdb\x6d\xb3\x8c\xc9\x13\xb5\xd2\xff\xbf\xcd\x9d\x76\xa1\xfa\x97\xdc\x81\x86\x30\x75\xb5\x86\x17\xf0\xec\x41\x14\xfb\x7f\x31\x0c\x7e\x90\x65\xbb\xdf\x70\x91\x3b\xf7\xcf\x1e\x83\x54\x66\x96\xc0\xa0\x88\x37\x6e\x7e\xac\x5f\x46\x1d\xae\x32\x1b\x2b\x8d\x86\xa8\x77\x8c\x06\x2d\xae\xb8\x71\xf4\x93\xdd\xdb\xe2\xea\xe1\x2b\xe4\x91\xeb\x8f\xa7\x7f\xb2\x4e\xbd\x36\xac\x31\xaa\x7b\xef\x88\x65\xf9\x71\x8a\x66\x8f\xe4\xbc\x7e\x06\xe9\x64\xeb\xd9\x14\x46\xf0\xa4\xdd\xac\x9e\x80\x7f\xd2\x8e\xfd\xbf\xd9\x68\xfb\xe8\xe3\x47\x6b\xe4\x58\xfa\xd3\xcd\xe8\x10\x84\x59\xd1\xac\x26\xae\xeb\xc1\x39\x12\x89\xd5\xfe\x39\xa8\xc9\x2e\x16\xf6\xa3\xe8\xb0\x6c\xcb\x1f\x41\x6e\x94\xfc\xb4\xff\x0a\xfa\x09\x59\xb7\xf8\x4f\xd1\xfe\x8b\xee\x4f\x0b\x1d\xbd\x7c\xbf\x5e\x3d\xcc\x75\xe4\xe9\xce\xe9\xae\x23\x9a\xde\x37\xee\x75\xa4\xab\x67\xf8\x9e\xe9\xab\x3f\x75\x75\x54\xf9\xce\xee\x1e\x0c\xbb\x57\xe5\x56\x3a\xf2\xae\x6a\x45\xa9\x90\x03\x65\x97\x3f\xe1\xca\x6e\xb7\x4e\xf7\x46\x80\x9d\xf1\xda\x7f\xcb\x01\xcd\x6e\xfd\x6f\x4f\x23\xbb\x9d\xc0\xe7\x5e\x0f\x3a\xf8\x27\x00\x00\xff\xff\x39\xb3\xeb\x73\x6c\x12\x00\x00") // FileArtifactsDefinitionsWindowsTriageProcessMemoryYaml is "artifacts/definitions/Windows/Triage/ProcessMemory.yaml" var FileArtifactsDefinitionsWindowsTriageProcessMemoryYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x92\xcd\xae\x9b\x30\x10\x85\xf7\x7e\x8a\xb3\xbb\x50\x71\xef\x03\x20\xb1\x4a\x89\xba\xc8\xed\x45\x49\xa4\x2c\x2b\x0b\x0f\xc1\x12\xfe\xe9\xd8\x94\x46\xfd\x79\xf6\x8a\x98\x44\x41\x8a\x7a\x76\xc6\x33\x67\xbe\x39\xd8\x4a\x43\x25\x4e\xda\x2a\x37\x85\xb7\x23\x6b\x79\xa6\xb7\x86\x5d\x4b\x21\xbc\x93\x71\x7c\x11\x8a\x42\xcb\xda\x47\xed\x6c\x89\xdf\x02\xf8\x3c\x1a\x0f\x9f\x6a\x60\xae\x45\x90\x56\x61\xf4\x83\x93\x0a\xd1\x21\xf6\x84\x40\xfc\x83\x58\x08\xcf\xd4\x3a\xab\x74\xea\x3f\xd4\xbb\x7a\x73\xc4\xc7\x01\x5b\x76\x06\xda\x76\x2e\xcb\x31\xf5\xc4\x34\x7f\xac\xf0\x32\x25\x96\x17\x21\xbc\x64\x69\x28\x12\x87\x52\x00\xaf\x48\xac\xcb\xe0\x3d\x9d\xe9\xa7\x00\x00\x45\x9d\x1c\x87\x58\xc2\xba\x48\x5e\x2a\x21\x82\x1b\xb9\xa5\xa5\xeb\xfb\x48\xac\xd3\x61\xd6\xeb\x75\x85\xa4\x5d\x7d\xbc\xd9\x51\x40\x75\x83\xfb\x2a\x0d\x41\x06\x2c\x31\xcc\xc7\x02\x1b\x67\x8c\xb4\x6a\xa7\x2d\x15\x68\xb4\xba\x9b\xcc\xda\xee\x3f\xde\xe1\xc3\xa0\x43\xcc\xf2\xd5\xcd\xe9\x4b\xbd\xaf\x93\x63\xf5\x77\xcd\xfe\x04\x68\x01\xf8\x94\x0c\x3b\xc7\x24\xdb\x3e\x7b\x30\x64\x37\x55\x77\xe2\xe2\xe1\x62\xde\xf2\x52\xfd\x5a\xcd\x5e\xdc\xfe\xbb\x46\x81\xed\x38\x0c\x8d\x8c\x7d\xb1\xea\x5d\x94\x7e\x69\xd6\xe9\x81\xaa\x5b\x61\x3e\x67\xb3\x61\x19\xfa\xf9\x21\x3c\xc9\x81\x5d\xfb\x4d\x8d\xc6\x67\x5e\xab\xaa\xd1\xea\x31\x91\x3f\xb9\xf8\x17\x00\x00\xff\xff\x69\xb4\xc6\x38\x73\x02\x00\x00") diff --git a/bin/artifacts_acquire.go b/bin/artifacts_acquire.go index a8050e15fe6..2613dfc1429 100644 --- a/bin/artifacts_acquire.go +++ b/bin/artifacts_acquire.go @@ -33,8 +33,8 @@ import ( config_proto "www.velocidex.com/golang/velociraptor/config/proto" "www.velocidex.com/golang/velociraptor/file_store/csv" logging "www.velocidex.com/golang/velociraptor/logging" + "www.velocidex.com/golang/velociraptor/uploads" vql_subsystem "www.velocidex.com/golang/velociraptor/vql" - vql_networking "www.velocidex.com/golang/velociraptor/vql/networking" vfilter "www.velocidex.com/golang/vfilter" ) @@ -75,7 +75,7 @@ func acquireArtifact(ctx context.Context, config_obj *config_proto.Config, env := ordereddict.NewDict(). Set("config", config_obj.Client). Set("server_config", config_obj). - Set("$uploader", &vql_networking.FileBasedUploader{ + Set("$uploader", &uploads.FileBasedUploader{ UploadDir: filepath.Join(subdir, "files"), }). Set(vql_subsystem.ACL_MANAGER_VAR, acl_manager). diff --git a/bin/console.go b/bin/console.go index a813458d947..e0c0fdba567 100644 --- a/bin/console.go +++ b/bin/console.go @@ -35,9 +35,9 @@ import ( artifacts "www.velocidex.com/golang/velociraptor/artifacts" config_proto "www.velocidex.com/golang/velociraptor/config/proto" "www.velocidex.com/golang/velociraptor/reporting" + "www.velocidex.com/golang/velociraptor/uploads" "www.velocidex.com/golang/velociraptor/utils" vql_subsystem "www.velocidex.com/golang/velociraptor/vql" - vql_networking "www.velocidex.com/golang/velociraptor/vql/networking" vfilter "www.velocidex.com/golang/vfilter" ) @@ -601,7 +601,7 @@ func doConsole() { env := ordereddict.NewDict(). Set("config", config_obj.Client). Set("server_config", config_obj). - Set("$uploader", &vql_networking.FileBasedUploader{ + Set("$uploader", &uploads.FileBasedUploader{ UploadDir: *console_dump_dir, }). Set(vql_subsystem.ACL_MANAGER_VAR, acl_manager). diff --git a/bin/fs.go b/bin/fs.go index 9378899803c..4d0769d574a 100644 --- a/bin/fs.go +++ b/bin/fs.go @@ -18,38 +18,51 @@ package main import ( + "context" + "io" "os" + "path/filepath" + "regexp" "strings" "github.com/Velocidex/ordereddict" kingpin "gopkg.in/alecthomas/kingpin.v2" + "www.velocidex.com/golang/velociraptor/file_store" + "www.velocidex.com/golang/velociraptor/glob" "www.velocidex.com/golang/velociraptor/reporting" + "www.velocidex.com/golang/velociraptor/uploads" vql_subsystem "www.velocidex.com/golang/velociraptor/vql" - vql_networking "www.velocidex.com/golang/velociraptor/vql/networking" vfilter "www.velocidex.com/golang/vfilter" ) var ( + accessor_reg = regexp.MustCompile( + "^(file|ntft|reg|registry|zip|raw_reg|lazy_ntfs|file_links|fs)://(.+)$") + fs_command = app.Command("fs", "Run filesystem commands.") fs_command_accessor = fs_command.Flag( - "accessor", "The FS accessor to use").Default("file").Enum( - "file", "ntfs", "reg", "zip", "raw_reg", "lazy_ntfs", "file_links") + "accessor", "The FS accessor to use").Default("file").String() fs_command_verbose = fs_command.Flag( - "details", "Show more verbose info").Short('d'). + "details", "Show more verbose info").Short('l'). Default("false").Bool() - fs_command_format = fs_command.Flag("format", "Output format to use (text,json,jsonl)."). - Default("text").Enum("text", "json", "jsonl") + fs_command_format = fs_command.Flag("format", "Output format to use (text,json,jsonl,csv)."). + Default("jsonl").Enum("text", "json", "jsonl", "csv") fs_command_ls = fs_command.Command("ls", "List files") fs_command_ls_path = fs_command_ls.Arg( - "path", "The path to list").Default("/").String() + "path", "The path or glob to list").Default("/").String() fs_command_cp = fs_command.Command("cp", "Copy files to a directory.") fs_command_cp_path = fs_command_cp.Arg( - "path", "The path to list").Default("/").String() + "path", "The path or glob to list").Required().String() + fs_command_cp_outdir = fs_command_cp.Arg( - "dumpdir", "The directory to store files at.").Default("."). - ExistingDir() + "dumpdir", "The directory to store files at.").Required(). + String() + + fs_command_cat = fs_command.Command("cat", "Dump a file to the terminal") + fs_command_cat_path = fs_command_cat.Arg( + "path", "The path to cat").Required().String() ) func eval_query(query string, scope *vfilter.Scope) { @@ -73,7 +86,15 @@ func eval_query(query string, scope *vfilter.Scope) { } } -func doLS(path string) { +func doLS(path, accessor string) { + initFilestoreAccessor() + + matches := accessor_reg.FindStringSubmatch(path) + if matches != nil { + accessor = matches[1] + path = matches[2] + } + if len(path) > 0 && (path[len(path)-1] == '/' || path[len(path)-1] == '\\') { path += "*" @@ -82,7 +103,7 @@ func doLS(path string) { env := ordereddict.NewDict(). Set(vql_subsystem.ACL_MANAGER_VAR, vql_subsystem.NewRoleACLManager("administrator")). - Set("accessor", *fs_command_accessor). + Set("accessor", accessor). Set("path", path) scope := vql_subsystem.MakeScope().AppendVars(env) @@ -90,61 +111,131 @@ func doLS(path string) { AddLogger(scope, get_config_or_default()) - query := "SELECT Name, Size, Mode.String AS Mode, " + - "timestamp(epoch=Mtime.Sec) as mtime, Data " + + query := "SELECT Name, Size, Mode.String AS Mode, Mtime, Data " + "FROM glob(globs=path, accessor=accessor) " if *fs_command_verbose { query = strings.Replace(query, "Name", "FullPath", 1) } + // Special handling for ntfs. - if !*fs_command_verbose && *fs_command_accessor == "ntfs" { + if !*fs_command_verbose && accessor == "ntfs" { query += " WHERE Sys.name_type != 'DOS' " } eval_query(query, scope) } -func doCp(path string, dump_dir string) { +func doCp(path, accessor string, dump_dir string) { + initFilestoreAccessor() + config_obj := get_config_or_default() + + matches := accessor_reg.FindStringSubmatch(path) + if matches != nil { + accessor = matches[1] + path = matches[2] + } + if len(path) > 0 && (path[len(path)-1] == '/' || path[len(path)-1] == '\\') { path += "*" } + if accessor == "file" { + path, _ = filepath.Abs(path) + } + + output_accessor := "" + output_path := dump_dir + + matches = accessor_reg.FindStringSubmatch(dump_dir) + if matches != nil { + output_accessor = matches[1] + output_path = matches[2] + } + env := ordereddict.NewDict(). - Set("accessor", *fs_command_accessor). + Set("accessor", accessor). Set("path", path). - Set("$uploader", &vql_networking.FileBasedUploader{ - UploadDir: dump_dir, - }). Set(vql_subsystem.ACL_MANAGER_VAR, vql_subsystem.NewRoleACLManager("administrator")). Set(vql_subsystem.CACHE_VAR, vql_subsystem.NewScopeCache()) + switch output_accessor { + case "", "file": + env.Set("$uploader", &uploads.FileBasedUploader{ + UploadDir: output_path, + }) + + case "fs": + uploader := file_store.NewFileStoreUploader( + config_obj, output_path) + env.Set("$uploader", uploader) + + default: + kingpin.Fatalf("Can not write to accessor %v\n", output_accessor) + } + scope := vql_subsystem.MakeScope().AppendVars(env) defer scope.Close() AddLogger(scope, get_config_or_default()) - eval_query(`SELECT * from foreach( + scope.Log("Copy from %v (%v) to %v (%v)", + path, accessor, output_path, output_accessor) + + eval_query(` +SELECT * from foreach( row={ SELECT Name, Size, Mode.String AS Mode, - timestamp(epoch=Mtime.Sec) as mtime, Data, FullPath + Mtime, Data, FullPath FROM glob(globs=path, accessor=accessor) - WHERE Sys.name_type != 'DOS' }, query={ - SELECT Name, Size, Mode, mtime, Data, - upload(file=FullPath, accessor=accessor) AS Upload + SELECT Name, Size, Mode, Mtime, Data, + upload(file=FullPath, accessor=accessor, name=Name) AS Upload FROM scope() })`, scope) } +func initFilestoreAccessor() { + config_obj, err := get_server_config(*config_path) + if err != nil { + return + } + + accessor, err := file_store.GetFileStoreFileSystemAccessor(config_obj) + kingpin.FatalIfError(err, "GetFileStoreFileSystemAccessor") + glob.Register("fs", accessor) +} + +func doCat(path, accessor_name string) { + initFilestoreAccessor() + matches := accessor_reg.FindStringSubmatch(path) + if matches != nil { + accessor_name = matches[1] + path = matches[2] + } + + ctx := context.Background() + accessor, err := glob.GetAccessor(accessor_name, ctx) + kingpin.FatalIfError(err, "GetAccessor") + + fd, err := accessor.Open(path) + kingpin.FatalIfError(err, "ReadFile") + + io.Copy(os.Stdout, fd) +} + func init() { command_handlers = append(command_handlers, func(command string) bool { switch command { - case "fs ls": - doLS(*fs_command_ls_path) - case "fs cp": - doCp(*fs_command_cp_path, *fs_command_cp_outdir) + case fs_command_ls.FullCommand(): + doLS(*fs_command_ls_path, *fs_command_accessor) + + case fs_command_cp.FullCommand(): + doCp(*fs_command_cp_path, *fs_command_accessor, *fs_command_cp_outdir) + + case fs_command_cat.FullCommand(): + doCat(*fs_command_cat_path, *fs_command_accessor) default: return false diff --git a/bin/query.go b/bin/query.go index 2dd47bfd92a..1edc6e4b8e5 100644 --- a/bin/query.go +++ b/bin/query.go @@ -29,8 +29,8 @@ import ( artifacts "www.velocidex.com/golang/velociraptor/artifacts" "www.velocidex.com/golang/velociraptor/file_store/csv" "www.velocidex.com/golang/velociraptor/reporting" + "www.velocidex.com/golang/velociraptor/uploads" vql_subsystem "www.velocidex.com/golang/velociraptor/vql" - vql_networking "www.velocidex.com/golang/velociraptor/vql/networking" "www.velocidex.com/golang/vfilter" ) @@ -158,7 +158,7 @@ func doQuery() { env := ordereddict.NewDict(). Set("config", config_obj.Client). Set("server_config", config_obj). - Set("$uploader", &vql_networking.FileBasedUploader{ + Set("$uploader", &uploads.FileBasedUploader{ UploadDir: *dump_dir, }). diff --git a/bin/unzip.go b/bin/unzip.go index c8bd77d342c..156d4fc182a 100644 --- a/bin/unzip.go +++ b/bin/unzip.go @@ -7,8 +7,8 @@ import ( "github.com/Velocidex/ordereddict" kingpin "gopkg.in/alecthomas/kingpin.v2" "www.velocidex.com/golang/velociraptor/reporting" + "www.velocidex.com/golang/velociraptor/uploads" vql_subsystem "www.velocidex.com/golang/velociraptor/vql" - vql_networking "www.velocidex.com/golang/velociraptor/vql/networking" "www.velocidex.com/golang/vfilter" ) @@ -78,7 +78,7 @@ func doUnzip() { } } else { - env.Set("$uploader", &vql_networking.FileBasedUploader{ + env.Set("$uploader", &uploads.FileBasedUploader{ UploadDir: *unzip_path, }) diff --git a/file_store/accessor.go b/file_store/accessor.go deleted file mode 100644 index 35c4d4e10eb..00000000000 --- a/file_store/accessor.go +++ /dev/null @@ -1,147 +0,0 @@ -package file_store - -// This implements a filesystem accessor which can be used to access -// the filestore. This allows us to run globs on the file store -// regardless of the specific filestore implementation. - -import ( - "context" - "encoding/json" - "os" - "path" - "path/filepath" - "regexp" - "time" - - config_proto "www.velocidex.com/golang/velociraptor/config/proto" - "www.velocidex.com/golang/velociraptor/glob" -) - -func (self *FileStoreFileInfo) Data() interface{} { - return self._data -} - -func (self *FileStoreFileInfo) FullPath() string { - return self._full_path -} - -func (self *FileStoreFileInfo) Mtime() glob.TimeVal { - return glob.TimeVal{} -} - -func (self *FileStoreFileInfo) Ctime() glob.TimeVal { - return glob.TimeVal{} -} - -func (self *FileStoreFileInfo) Atime() glob.TimeVal { - return glob.TimeVal{} -} - -func (self *FileStoreFileInfo) IsLink() bool { - return self.Mode()&os.ModeSymlink != 0 -} - -func (self *FileStoreFileInfo) GetLink() (string, error) { - target, err := os.Readlink(self._full_path) - if err != nil { - return "", err - } - return target, nil -} - -func (self *FileStoreFileInfo) MarshalJSON() ([]byte, error) { - result, err := json.Marshal(&struct { - FullPath string - Size int64 - Mode os.FileMode - ModeStr string - ModTime time.Time - Sys interface{} - Mtime glob.TimeVal - Ctime glob.TimeVal - Atime glob.TimeVal - }{ - FullPath: self.FullPath(), - Size: self.Size(), - Mode: self.Mode(), - ModeStr: self.Mode().String(), - ModTime: self.ModTime(), - Sys: self.Sys(), - Mtime: self.Mtime(), - Ctime: self.Ctime(), - Atime: self.Atime(), - }) - - return result, err -} - -func (self *FileStoreFileInfo) UnmarshalJSON(data []byte) error { - return nil -} - -type FileStoreFileSystemAccessor struct { - file_store FileStore -} - -func (self FileStoreFileSystemAccessor) New(ctx context.Context) glob.FileSystemAccessor { - return &FileStoreFileSystemAccessor{self.file_store} -} - -func (self FileStoreFileSystemAccessor) Lstat(filename string) (glob.FileInfo, error) { - lstat, err := self.file_store.StatFile(filename) - if err != nil { - return nil, err - } - - return &FileStoreFileInfo{lstat, filename, nil}, nil -} - -func (self FileStoreFileSystemAccessor) ReadDir(path string) ([]glob.FileInfo, error) { - files, err := self.file_store.ListDirectory(path) - if err != nil { - return nil, err - } - - var result []glob.FileInfo - for _, f := range files { - result = append(result, - &FileStoreFileInfo{f, filepath.Join(path, f.Name()), nil}) - } - - return result, nil -} - -func (self FileStoreFileSystemAccessor) Open(path string) (glob.ReadSeekCloser, error) { - file, err := self.file_store.ReadFile(path) - if err != nil { - return nil, err - } - - return file, nil -} - -var FileStoreFileSystemAccessor_re = regexp.MustCompile("/") - -func (self FileStoreFileSystemAccessor) PathSplit(path string) []string { - return FileStoreFileSystemAccessor_re.Split(path, -1) -} - -func (self FileStoreFileSystemAccessor) PathJoin(root, stem string) string { - return path.Join(root, stem) -} - -func (self *FileStoreFileSystemAccessor) GetRoot(path string) (string, string, error) { - return "/", path, nil -} - -func GetFileStoreFileSystemAccessor( - config_obj *config_proto.Config) *FileStoreFileSystemAccessor { - if config_obj.Datastore.Implementation == "MySQL" { - datastore, err := NewSqlFileStore(config_obj) - if err != nil { - return nil - } - return &FileStoreFileSystemAccessor{datastore} - } - return &FileStoreFileSystemAccessor{&DirectoryFileStore{config_obj}} -} diff --git a/file_store/adapter.go b/file_store/adapter.go new file mode 100644 index 00000000000..b310ce70773 --- /dev/null +++ b/file_store/adapter.go @@ -0,0 +1,75 @@ +package file_store + +import ( + "os" + + "github.com/pkg/errors" + "www.velocidex.com/golang/velociraptor/glob" +) + +// Implement the glob.FileInfo +type FileInfoAdapter struct { + os.FileInfo + + full_path string + _data interface{} +} + +func NewFileInfoAdapter(fd os.FileInfo, full_path string, data interface{}) *FileInfoAdapter { + return &FileInfoAdapter{ + FileInfo: fd, + full_path: full_path, + _data: data, + } +} + +func (self FileInfoAdapter) Data() interface{} { + return self._data +} + +func (self FileInfoAdapter) FullPath() string { + return self.full_path +} + +func (self FileInfoAdapter) Mtime() glob.TimeVal { + return glob.TimeVal{} +} + +func (self FileInfoAdapter) Atime() glob.TimeVal { + return glob.TimeVal{} +} + +func (self FileInfoAdapter) Ctime() glob.TimeVal { + return glob.TimeVal{} +} + +func (self FileInfoAdapter) IsLink() bool { + return self.Mode()&os.ModeSymlink != 0 +} + +func (self FileInfoAdapter) GetLink() (string, error) { + return "", errors.New("Not implemented") +} + +type FileAdapter struct { + *os.File + + FullPath string +} + +func (self *FileAdapter) Stat() (glob.FileInfo, error) { + stat, err := self.File.Stat() + if err != nil { + return nil, err + } + return NewFileInfoAdapter(stat, self.FullPath, nil), nil +} + +type FileReaderAdapter struct { + FileReader +} + +func (self *FileReaderAdapter) Stat() (os.FileInfo, error) { + stat, err := self.FileReader.Stat() + return stat, err +} diff --git a/file_store/compress.go b/file_store/compress.go index 0f08df55e05..46254871b50 100644 --- a/file_store/compress.go +++ b/file_store/compress.go @@ -21,12 +21,16 @@ import ( "fmt" "io" "os" + + "www.velocidex.com/golang/velociraptor/glob" ) // GzipReader is a FileReader from compressed files. type GzipReader struct { zip_fd io.Reader backing_file *os.File + + full_path string } func (self *GzipReader) Read(buff []byte) (int, error) { @@ -46,8 +50,12 @@ func (self *GzipReader) Seek(offset int64, whence int) (int64, error) { offset, whence) } -func (self GzipReader) Stat() (os.FileInfo, error) { - return self.backing_file.Stat() +func (self GzipReader) Stat() (glob.FileInfo, error) { + stat, err := self.backing_file.Stat() + if err != nil { + return nil, err + } + return NewFileInfoAdapter(stat, self.full_path, nil), nil } func (self *GzipReader) Close() error { diff --git a/file_store/directory.go b/file_store/directory.go new file mode 100644 index 00000000000..8e71547787b --- /dev/null +++ b/file_store/directory.go @@ -0,0 +1,236 @@ +// This is an implementation of the file store based on single files +// and directories on the filesystem. + +// It is very fast but can not be shared between multiple hosts since +// the filesystem must be locally accessible. A remote filesystem, +// such as NFS might work but this configuration is not tested nor +// supported. + +package file_store + +/* + + This file store implementation stores files on disk. All of these + functions receive serialized Velociraptor's VFS paths. + + Velociraptor paths are a sequence of string components. When the VFS + path is serialized, we join the components using the path separator + (by default /) . If the component contains path separators, they + will be escaped (see utils/path.go). + + There is a direct mapping between VFS paths and filenames on + disk. This mapping is reversible and supports correct round + tripping. + + Use FilenameToFileStorePath() to convert from a serialized VFS path to a disk path + + Calling any of the Filestore methods (ReadDir, Open, Lstat) will + always return VFS paths. +*/ + +import ( + "compress/gzip" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + config_proto "www.velocidex.com/golang/velociraptor/config/proto" + "www.velocidex.com/golang/velociraptor/datastore" + logging "www.velocidex.com/golang/velociraptor/logging" + "www.velocidex.com/golang/velociraptor/utils" +) + +var ( + openCounter = promauto.NewCounter(prometheus.CounterOpts{ + Name: "file_store_open", + Help: "Total number of filestore open operations.", + }) + + listCounter = promauto.NewCounter(prometheus.CounterOpts{ + Name: "file_store_list", + Help: "Total number of filestore list children operations.", + }) +) + +const ( + // On windows all file paths must be prefixed by this to + // support long paths. + WINDOWS_LFN_PREFIX = "\\\\?\\" +) + +type DirectoryFileWriter struct { + Fd *os.File +} + +func (self *DirectoryFileWriter) Size() (int64, error) { + return self.Fd.Seek(0, os.SEEK_END) +} + +func (self *DirectoryFileWriter) Write(data []byte) (int, error) { + _, err := self.Fd.Seek(0, os.SEEK_END) + if err != nil { + return 0, err + } + + return self.Fd.Write(data) +} + +func (self *DirectoryFileWriter) Truncate() error { + return self.Fd.Truncate(0) +} + +func (self *DirectoryFileWriter) Close() error { + return self.Fd.Close() +} + +type DirectoryFileStore struct { + config_obj *config_proto.Config +} + +func (self *DirectoryFileStore) ListDirectory(dirname string) ( + []os.FileInfo, error) { + + listCounter.Inc() + + file_path := self.FilenameToFileStorePath(dirname) + files, err := utils.ReadDir(file_path) + if err != nil { + return nil, err + } + + var result []os.FileInfo + for _, fileinfo := range files { + result = append(result, &DirectoryFileStoreFileInfo{ + fileinfo, + utils.PathJoin(dirname, fileinfo.Name(), "/"), + nil}) + } + + return result, nil +} + +func getCompressed(filename string) (FileReader, error) { + fd, err := os.Open(filename) + if err != nil { + return nil, errors.WithStack(err) + } + + zr, err := gzip.NewReader(fd) + if err != nil { + return nil, errors.WithStack(err) + } + + return &GzipReader{zr, fd, filename}, nil +} + +func (self *DirectoryFileStore) ReadFile(filename string) (FileReader, error) { + file_path := self.FilenameToFileStorePath(filename) + if strings.HasSuffix(".gz", file_path) { + return getCompressed(file_path) + } + + openCounter.Inc() + file, err := os.Open(file_path) + if os.IsNotExist(err) { + return getCompressed(file_path + ".gz") + } + + if err != nil { + return nil, errors.WithStack(err) + } + return &FileAdapter{file, filename}, nil +} + +func (self *DirectoryFileStore) StatFile(filename string) (os.FileInfo, error) { + file_path := self.FilenameToFileStorePath(filename) + file, err := os.Stat(file_path) + if err != nil { + return nil, err + } + + return &DirectoryFileStoreFileInfo{file, filename, nil}, nil +} + +func (self *DirectoryFileStore) WriteFile(filename string) (FileWriter, error) { + file_path := self.FilenameToFileStorePath(filename) + err := os.MkdirAll(filepath.Dir(file_path), 0700) + if err != nil { + logging.GetLogger(self.config_obj, + &logging.FrontendComponent).Error( + "Can not create dir", err) + return nil, err + } + + openCounter.Inc() + file, err := os.OpenFile(file_path, os.O_RDWR|os.O_CREATE, 0700) + if err != nil { + logging.GetLogger(self.config_obj, &logging.FrontendComponent).Error( + "Unable to open file "+file_path, err) + return nil, errors.WithStack(err) + } + + return &DirectoryFileWriter{file}, nil +} + +func (self *DirectoryFileStore) Delete(filename string) error { + file_path := self.FilenameToFileStorePath(filename) + return os.Remove(file_path) +} + +// In the below: +// Filename: is an abstract filename to be represented in the file store. +// FileStorePath: An actual path to store the file on the filesystem. +// +// On windows, the FileStorePath always includes the LFN prefix. +func (self *DirectoryFileStore) FilenameToFileStorePath(filename string) string { + components := []string{self.config_obj.Datastore.FilestoreDirectory} + for _, component := range utils.SplitComponents(filename) { + components = append(components, + string(datastore.SanitizeString(component))) + } + + result := filepath.Join(components...) + if runtime.GOOS == "windows" { + return WINDOWS_LFN_PREFIX + result + } + return result +} + +// Converts from a physical path on disk to a normalized filestore path. +func (self *DirectoryFileStore) FileStorePathToFilename(filename string) ( + string, error) { + + if runtime.GOOS == "windows" { + filename = strings.TrimPrefix(filename, WINDOWS_LFN_PREFIX) + } + filename = strings.TrimPrefix(filename, + self.config_obj.Datastore.FilestoreDirectory) + + components := []string{} + for _, component := range strings.Split( + filename, + string(os.PathSeparator)) { + components = append(components, + string(datastore.UnsanitizeComponent(component))) + } + + result := filepath.Join(components...) + return result, nil +} + +func (self *DirectoryFileStore) Walk(root string, walkFn filepath.WalkFunc) error { + path := self.FilenameToFileStorePath(root) + return filepath.Walk(path, + func(path string, info os.FileInfo, err error) error { + filename, err_1 := self.FileStorePathToFilename(path) + if err_1 != nil { + return err_1 + } + return walkFn(filename, + &DirectoryFileStoreFileInfo{info, path, nil}, err) + }) +} diff --git a/file_store/directory_accessor.go b/file_store/directory_accessor.go new file mode 100644 index 00000000000..9de4f05cfb3 --- /dev/null +++ b/file_store/directory_accessor.go @@ -0,0 +1,148 @@ +package file_store + +// This implements a filesystem accessor which can be used to access +// the filestore. This allows us to run globs on the file store +// regardless of the specific filestore implementation. +// This accessor is for DirectoryFileStore + +import ( + "context" + "encoding/json" + "os" + "path" + "path/filepath" + "regexp" + "time" + + "www.velocidex.com/golang/velociraptor/datastore" + "www.velocidex.com/golang/velociraptor/glob" +) + +type DirectoryFileStoreFileInfo struct { + os.FileInfo + _full_path string + _data interface{} +} + +func (self DirectoryFileStoreFileInfo) Name() string { + return datastore.UnsanitizeComponent(self.FileInfo.Name()) +} + +func (self *DirectoryFileStoreFileInfo) Data() interface{} { + return self._data +} + +func (self *DirectoryFileStoreFileInfo) FullPath() string { + return self._full_path +} + +func (self *DirectoryFileStoreFileInfo) Mtime() glob.TimeVal { + return glob.TimeVal{} +} + +func (self *DirectoryFileStoreFileInfo) Ctime() glob.TimeVal { + return glob.TimeVal{} +} + +func (self *DirectoryFileStoreFileInfo) Atime() glob.TimeVal { + return glob.TimeVal{} +} + +func (self *DirectoryFileStoreFileInfo) IsLink() bool { + return self.Mode()&os.ModeSymlink != 0 +} + +func (self *DirectoryFileStoreFileInfo) GetLink() (string, error) { + target, err := os.Readlink(self._full_path) + if err != nil { + return "", err + } + return target, nil +} + +func (self *DirectoryFileStoreFileInfo) MarshalJSON() ([]byte, error) { + result, err := json.Marshal(&struct { + FullPath string + Size int64 + Mode os.FileMode + ModeStr string + ModTime time.Time + Sys interface{} + Mtime glob.TimeVal + Ctime glob.TimeVal + Atime glob.TimeVal + }{ + FullPath: self.FullPath(), + Size: self.Size(), + Mode: self.Mode(), + ModeStr: self.Mode().String(), + ModTime: self.ModTime(), + Sys: self.Sys(), + Mtime: self.Mtime(), + Ctime: self.Ctime(), + Atime: self.Atime(), + }) + + return result, err +} + +func (self *DirectoryFileStoreFileInfo) UnmarshalJSON(data []byte) error { + return nil +} + +type DirectoryFileStoreFileSystemAccessor struct { + file_store *DirectoryFileStore +} + +func (self DirectoryFileStoreFileSystemAccessor) New( + ctx context.Context) glob.FileSystemAccessor { + return &DirectoryFileStoreFileSystemAccessor{self.file_store} +} + +func (self DirectoryFileStoreFileSystemAccessor) Lstat( + filename string) (glob.FileInfo, error) { + lstat, err := self.file_store.StatFile(filename) + if err != nil { + return nil, err + } + + return &DirectoryFileStoreFileInfo{lstat, filename, nil}, nil +} + +func (self DirectoryFileStoreFileSystemAccessor) ReadDir(path string) ([]glob.FileInfo, error) { + files, err := self.file_store.ListDirectory(path) + if err != nil { + return nil, err + } + + var result []glob.FileInfo + for _, f := range files { + result = append(result, + &DirectoryFileStoreFileInfo{f, filepath.Join(path, f.Name()), nil}) + } + + return result, nil +} + +func (self DirectoryFileStoreFileSystemAccessor) Open(path string) (glob.ReadSeekCloser, error) { + file, err := self.file_store.ReadFile(path) + if err != nil { + return nil, err + } + + return &FileReaderAdapter{file}, nil +} + +var DirectoryFileStoreFileSystemAccessor_re = regexp.MustCompile("/") + +func (self DirectoryFileStoreFileSystemAccessor) PathSplit(path string) []string { + return DirectoryFileStoreFileSystemAccessor_re.Split(path, -1) +} + +func (self DirectoryFileStoreFileSystemAccessor) PathJoin(root, stem string) string { + return path.Join(root, stem) +} + +func (self *DirectoryFileStoreFileSystemAccessor) GetRoot(path string) (string, string, error) { + return "/", path, nil +} diff --git a/file_store/file_store.go b/file_store/file_store.go index afd8c96a7d4..06780745a51 100644 --- a/file_store/file_store.go +++ b/file_store/file_store.go @@ -15,67 +15,21 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -/* - - This file store implementation stores files on disk. All of these - functions receive serialized Velociraptor's VFS paths. - - Velociraptor paths are a sequence of string components. When the VFS - path is serialized, we join the components using the path separator - (by default /) . If the component contains path separators, they - will be escaped (see utils/path.go). - - There is a direct mapping between VFS paths and filenames on - disk. This mapping is reversible and supports correct round - tripping. - - Use FilenameToFileStorePath() to convert from a serialized VFS path to a disk path - - Calling any of the Filestore methods (ReadDir, Open, Lstat) will - always return VFS paths. -*/ package file_store import ( - "compress/gzip" + "fmt" "os" "path/filepath" - "runtime" - "strings" - "sync" - - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" config_proto "www.velocidex.com/golang/velociraptor/config/proto" - "www.velocidex.com/golang/velociraptor/datastore" - logging "www.velocidex.com/golang/velociraptor/logging" - "www.velocidex.com/golang/velociraptor/utils" -) - -var ( - openCounter = promauto.NewCounter(prometheus.CounterOpts{ - Name: "file_store_open", - Help: "Total number of filestore open operations.", - }) - - listCounter = promauto.NewCounter(prometheus.CounterOpts{ - Name: "file_store_list", - Help: "Total number of filestore list children operations.", - }) -) - -const ( - // On windows all file paths must be prefixed by this to - // support long paths. - WINDOWS_LFN_PREFIX = "\\\\?\\" + "www.velocidex.com/golang/velociraptor/glob" ) type FileReader interface { Read(buff []byte) (int, error) Seek(offset int64, whence int) (int64, error) - Stat() (os.FileInfo, error) + Stat() (glob.FileInfo, error) Close() error } @@ -93,223 +47,45 @@ type FileWriter interface { type FileStore interface { ReadFile(filename string) (FileReader, error) WriteFile(filename string) (FileWriter, error) - StatFile(filename string) (*FileStoreFileInfo, error) + StatFile(filename string) (os.FileInfo, error) ListDirectory(dirname string) ([]os.FileInfo, error) Walk(root string, cb filepath.WalkFunc) error Delete(filename string) error } -type DirectoryFileWriter struct { - Fd *os.File -} - -func (self *DirectoryFileWriter) Size() (int64, error) { - return self.Fd.Seek(0, os.SEEK_END) -} - -func (self *DirectoryFileWriter) Write(data []byte) (int, error) { - _, err := self.Fd.Seek(0, os.SEEK_END) - if err != nil { - return 0, err - } - - return self.Fd.Write(data) -} - -func (self *DirectoryFileWriter) Truncate() error { - return self.Fd.Truncate(0) -} - -func (self *DirectoryFileWriter) Close() error { - return self.Fd.Close() -} - -type FileStoreFileInfo struct { - os.FileInfo - _full_path string - _data interface{} -} - -func (self FileStoreFileInfo) Name() string { - return datastore.UnsanitizeComponent(self.FileInfo.Name()) -} - -type DirectoryFileStore struct { - config_obj *config_proto.Config -} - -func (self *DirectoryFileStore) ListDirectory(dirname string) ( - []os.FileInfo, error) { - - listCounter.Inc() - - file_path := self.FilenameToFileStorePath(dirname) - files, err := utils.ReadDir(file_path) - if err != nil { - return nil, err - } - - var result []os.FileInfo - for _, fileinfo := range files { - result = append(result, &FileStoreFileInfo{ - fileinfo, - utils.PathJoin(dirname, fileinfo.Name(), "/"), - nil}) - } - - return result, nil -} - -func getCompressed(filename string) (FileReader, error) { - fd, err := os.Open(filename) - if err != nil { - return nil, errors.WithStack(err) - } - - zr, err := gzip.NewReader(fd) - if err != nil { - return nil, errors.WithStack(err) - } - - return &GzipReader{zr, fd}, nil -} - -func (self *DirectoryFileStore) ReadFile(filename string) (FileReader, error) { - file_path := self.FilenameToFileStorePath(filename) - if strings.HasSuffix(".gz", file_path) { - return getCompressed(file_path) - } - - openCounter.Inc() - file, err := os.Open(file_path) - if os.IsNotExist(err) { - return getCompressed(file_path + ".gz") - } - - if err != nil { - return nil, errors.WithStack(err) - } - return file, nil -} - -func (self *DirectoryFileStore) StatFile(filename string) (*FileStoreFileInfo, error) { - file_path := self.FilenameToFileStorePath(filename) - file, err := os.Stat(file_path) - if err != nil { - return nil, err - } - - return &FileStoreFileInfo{file, filename, nil}, nil -} - -func (self *DirectoryFileStore) WriteFile(filename string) (FileWriter, error) { - file_path := self.FilenameToFileStorePath(filename) - err := os.MkdirAll(filepath.Dir(file_path), 0700) - if err != nil { - logging.GetLogger(self.config_obj, - &logging.FrontendComponent).Error( - "Can not create dir", err) - return nil, err - } - - openCounter.Inc() - file, err := os.OpenFile(file_path, os.O_RDWR|os.O_CREATE, 0700) - if err != nil { - logging.GetLogger(self.config_obj, &logging.FrontendComponent).Error( - "Unable to open file "+file_path, err) - return nil, errors.WithStack(err) - } - - return &DirectoryFileWriter{file}, nil -} - -func (self *DirectoryFileStore) Delete(filename string) error { - file_path := self.FilenameToFileStorePath(filename) - return os.Remove(file_path) -} - -// In the below: -// Filename: is an abstract filename to be represented in the file store. -// FileStorePath: An actual path to store the file on the filesystem. -// -// On windows, the FileStorePath always includes the LFN prefix. -func (self *DirectoryFileStore) FilenameToFileStorePath(filename string) string { - components := []string{self.config_obj.Datastore.FilestoreDirectory} - for _, component := range utils.SplitComponents(filename) { - components = append(components, - string(datastore.SanitizeString(component))) - } - - result := filepath.Join(components...) - if runtime.GOOS == "windows" { - return WINDOWS_LFN_PREFIX + result - } - return result -} - -// Converts from a physical path on disk to a normalized filestore path. -func (self *DirectoryFileStore) FileStorePathToFilename(filename string) ( - string, error) { - - if runtime.GOOS == "windows" { - filename = strings.TrimPrefix(filename, WINDOWS_LFN_PREFIX) - } - filename = strings.TrimPrefix(filename, - self.config_obj.Datastore.FilestoreDirectory) - - components := []string{} - for _, component := range strings.Split( - filename, - string(os.PathSeparator)) { - components = append(components, - string(datastore.UnsanitizeComponent(component))) - } - - result := filepath.Join(components...) - return result, nil -} - -func (self *DirectoryFileStore) Walk(root string, walkFn filepath.WalkFunc) error { - path := self.FilenameToFileStorePath(root) - return filepath.Walk(path, - func(path string, info os.FileInfo, err error) error { - filename, err_1 := self.FileStorePathToFilename(path) - if err_1 != nil { - return err_1 - } - return walkFn(filename, - &FileStoreFileInfo{info, path, nil}, err) - }) -} - -var ( - mu sync.Mutex - implementations map[string]FileStore = make(map[string]FileStore) -) - -// Currently we only support a DirectoryFileStore. +// GetFileStore selects an appropriate FileStore object based on +// config. func GetFileStore(config_obj *config_proto.Config) FileStore { - if config_obj.Datastore.Implementation == "Test" { - mu.Lock() - defer mu.Unlock() + switch config_obj.Datastore.Implementation { + case "Test": + return test_memory_file_store - impl, pres := implementations["Test"] - if !pres { - impl = &MemoryFileStore{ - Data: make(map[string][]byte)} - implementations["Test"] = impl + case "MySQL": + res, err := NewSqlFileStore(config_obj) + if err != nil { + panic(err) } + return res + + case "FileBaseDataStore": + return &DirectoryFileStore{config_obj} - return impl + default: + panic(fmt.Sprintf("Unsupported filestore %v", + config_obj.Datastore.Implementation)) } +} +// Gets an accessor that can access the file store. +func GetFileStoreFileSystemAccessor( + config_obj *config_proto.Config) (glob.FileSystemAccessor, error) { if config_obj.Datastore.Implementation == "MySQL" { - res, err := NewSqlFileStore(config_obj) + datastore, err := NewSqlFileStore(config_obj) if err != nil { - panic(err) + return nil, err } - return res + return &SqlFileStoreAccessor{datastore.(*SqlFileStore)}, nil } - - return &DirectoryFileStore{config_obj} + return &DirectoryFileStoreFileSystemAccessor{ + &DirectoryFileStore{config_obj}}, nil } diff --git a/file_store/memory.go b/file_store/memory.go index 40b63cd7254..efc27831d68 100644 --- a/file_store/memory.go +++ b/file_store/memory.go @@ -7,6 +7,13 @@ import ( "sync" "github.com/pkg/errors" + "www.velocidex.com/golang/velociraptor/glob" +) + +var ( + // Only used for tests. + test_memory_file_store *MemoryFileStore = &MemoryFileStore{ + Data: make(map[string][]byte)} ) type MemoryReader struct { @@ -17,7 +24,7 @@ func (self MemoryReader) Close() error { return nil } -func (self MemoryReader) Stat() (os.FileInfo, error) { +func (self MemoryReader) Stat() (glob.FileInfo, error) { return nil, errors.New("Not Implemented") } @@ -82,8 +89,8 @@ func (self *MemoryFileStore) WriteFile(filename string) (FileWriter, error) { }, nil } -func (self *MemoryFileStore) StatFile(filename string) (*FileStoreFileInfo, error) { - return &FileStoreFileInfo{}, nil +func (self *MemoryFileStore) StatFile(filename string) (os.FileInfo, error) { + return &DirectoryFileStoreFileInfo{}, nil } func (self *MemoryFileStore) ListDirectory(dirname string) ([]os.FileInfo, error) { diff --git a/file_store/mysql.go b/file_store/mysql.go index 567abe21d73..89c7cf41c06 100644 --- a/file_store/mysql.go +++ b/file_store/mysql.go @@ -4,6 +4,7 @@ import ( "context" "crypto/sha1" "database/sql" + "encoding/json" "fmt" "io" "os" @@ -26,6 +27,10 @@ var ( set_subject_dir_cache *cache.LRUCache ) +const ( + MAX_BLOB_SIZE = 1<<16 - 1 +) + type _cache_item struct{} func (self _cache_item) Size() int { return 1 } @@ -33,6 +38,8 @@ func (self _cache_item) Size() int { return 1 } type MysqlFileStoreFileInfo struct { path string name string + is_dir bool + size int64 timestamp int64 } @@ -40,8 +47,11 @@ func (self MysqlFileStoreFileInfo) FullPath() string { return path.Join(self.path, self.name) } -func (self MysqlFileStoreFileInfo) Mtime() glob.TimeVal { - return glob.TimeVal{} +func (self *MysqlFileStoreFileInfo) Mtime() glob.TimeVal { + return glob.TimeVal{ + Sec: self.timestamp, + Nsec: self.timestamp * 1000000000, + } } func (self MysqlFileStoreFileInfo) Atime() glob.TimeVal { @@ -49,7 +59,10 @@ func (self MysqlFileStoreFileInfo) Atime() glob.TimeVal { } func (self MysqlFileStoreFileInfo) Ctime() glob.TimeVal { - return glob.TimeVal{} + return glob.TimeVal{ + Sec: self.timestamp, + Nsec: self.timestamp * 1000000000, + } } func (self MysqlFileStoreFileInfo) Data() interface{} { @@ -69,11 +82,11 @@ func (self MysqlFileStoreFileInfo) Name() string { } func (self MysqlFileStoreFileInfo) Size() int64 { - return 0 + return self.size } func (self MysqlFileStoreFileInfo) Mode() os.FileMode { - return os.ModeDir + return os.ModeDir | 0777 } func (self MysqlFileStoreFileInfo) ModTime() time.Time { @@ -81,13 +94,46 @@ func (self MysqlFileStoreFileInfo) ModTime() time.Time { } func (self MysqlFileStoreFileInfo) IsDir() bool { - return true + if self.size > 0 { + return false + } + return true //self.is_dir } func (self MysqlFileStoreFileInfo) Sys() interface{} { return nil } +func (self *MysqlFileStoreFileInfo) MarshalJSON() ([]byte, error) { + result, err := json.Marshal(&struct { + FullPath string + Size int64 + Mode os.FileMode + ModeStr string + ModTime time.Time + Sys interface{} + Mtime glob.TimeVal + Ctime glob.TimeVal + Atime glob.TimeVal + }{ + FullPath: self.FullPath(), + Size: self.Size(), + Mode: self.Mode(), + ModeStr: self.Mode().String(), + ModTime: self.ModTime(), + Sys: self.Sys(), + Mtime: self.Mtime(), + Ctime: self.Ctime(), + Atime: self.Atime(), + }) + + return result, err +} + +func (self *MysqlFileStoreFileInfo) UnmarshalJSON(data []byte) error { + return nil +} + type SqlReader struct { config_obj *config_proto.Config @@ -96,6 +142,10 @@ type SqlReader struct { part int64 file_id int64 filename string + + is_dir bool + size int64 + timestamp int64 } // Seek loads a new chunk into the current_chunk buffer and prepares @@ -212,7 +262,17 @@ func (self *SqlReader) Read(buff []byte) (int, error) { return offset, nil } -func (self SqlReader) Stat() (os.FileInfo, error) { +func (self SqlReader) Stat() (glob.FileInfo, error) { + dir_path, name := utils.PathSplit(self.filename) + + return &MysqlFileStoreFileInfo{ + path: dir_path, + name: name, + is_dir: self.is_dir, + size: self.size, + timestamp: self.timestamp, + }, nil + return nil, errors.New("Not Implemented") } @@ -232,6 +292,10 @@ func (self SqlWriter) Close() error { } func (self *SqlWriter) Write(buff []byte) (int, error) { + if len(buff) == 0 { + return 0, nil + } + db, err := sql.Open("mysql", self.config_obj.Datastore.MysqlConnectionString) if err != nil { return 0, err @@ -245,44 +309,76 @@ func (self *SqlWriter) Write(buff []byte) (int, error) { return 0, err } + defer tx.Rollback() + + last_part_query, err := tx.Prepare(` +SELECT A.part, A.end_offset FROM filestore AS A join ( + SELECT max(part) AS max_part FROM filestore WHERE id=? +) AS B +ON A.part = B.max_part AND A.id = ?`) + if err != nil { + return 0, err + } + defer last_part_query.Close() + + insert, err := tx.Prepare(`INSERT INTO filestore (id, part, start_offset, end_offset, data) VALUES (?, ?, ?, ?,?)`) + if err != nil { + return 0, err + } + defer insert.Close() + + update_metadata, err := tx.Prepare(`UPDATE filestore_metadata SET timestamp=now(), size=size + ? WHERE id = ?`) + if err != nil { + return 0, err + } + defer update_metadata.Close() + // Append the buffer to the data table end := int64(0) part := int64(0) - length := int64(len(buff)) // Get the last part and end offset. SELECT max() on a primary // key is instant. We then use this part to look up the row // using the all_column index. - err = tx.QueryRow(` -SELECT A.part, A.end_offset FROM filestore AS A join ( - SELECT max(part) AS max_part FROM filestore WHERE id=? -) AS B -ON A.part = B.max_part AND A.id = ?`, - self.file_id, self.file_id).Scan( + err = last_part_query.QueryRow(self.file_id, self.file_id).Scan( &part, &end) // No parts exist yet if err == sql.ErrNoRows { part = 0 } else if err != nil { - _ = tx.Rollback() return 0, err } else { part += 1 } - _, err = tx.Exec(` -INSERT INTO filestore (id, part, start_offset, end_offset, data) VALUES (?, ?, ?, ?,?)`, - self.file_id, part, end, end+length, buff) - if err != nil { - _ = tx.Rollback() - fmt.Printf("SqlCloserWriter.Write: %v", err) - return 0, err + // Push the buffer into the table one chunk at the time. + for len(buff) > 0 { + // We store the data in blobs which are limited to + // 64kb. + length := int64(len(buff)) + if length > MAX_BLOB_SIZE { + length = MAX_BLOB_SIZE + } + + // Write this chunk only. + chunk := buff[:length] + + _, err = insert.Exec( + self.file_id, part, end, end+length, chunk) + if err != nil { + return 0, err + } + + // Increase our size + self.size = end + length + + // Advance the buffer some more. + buff = buff[length:] + part += 1 } - _, err = tx.Exec(`UPDATE filestore_metadata SET timestamp=now(), size=size + ? WHERE id = ?`, - int64(len(buff)), self.file_id) + _, err = update_metadata.Exec(int64(len(buff)), self.file_id) if err != nil { - _ = tx.Rollback() return 0, err } @@ -291,9 +387,6 @@ INSERT INTO filestore (id, part, start_offset, end_offset, data) VALUES (?, ?, ? return 0, err } - // Increase our size - self.size = end + length - return len(buff), nil } @@ -304,11 +397,41 @@ func (self SqlWriter) Truncate() error { } defer db.Close() + // TODO - retry transaction. + ctx := context.Background() + tx, err := db.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelSerializable}) + if err != nil { + return err + } + // Essentially delete all the filestore rows for this file id. - _, err = db.Exec("DELETE FROM filestore WHERE id = ?", self.file_id) + _, err = tx.Exec("DELETE FROM filestore WHERE id = ?", self.file_id) + if err != nil { + _ = tx.Rollback() + return err + } + + // Reset the size of the file in the metadata. + _, err = tx.Exec(`UPDATE filestore_metadata SET timestamp=now(), size=0 WHERE id = ?`, + self.file_id) + if err != nil { + _ = tx.Rollback() + return err + } + + err = tx.Commit() + if err != nil { + return err + } + return err } +func hash(path string) string { + hash := sha1.Sum([]byte(path)) + return string(hash[:]) +} + type SqlFileStore struct { mu sync.Mutex @@ -319,45 +442,46 @@ func (self *SqlFileStore) ReadFile(filename string) (FileReader, error) { self.mu.Lock() defer self.mu.Unlock() + result := &SqlReader{ + config_obj: self.config_obj, + filename: filename, + } + db, err := sql.Open("mysql", self.config_obj.Datastore.MysqlConnectionString) if err != nil { return nil, err } defer db.Close() - dir_path := path.Dir(filename) - name := path.Base(filename) - hash := sha1.Sum([]byte(dir_path)) + dir_path, name := utils.PathSplit(filename) // Create the file metadata - var id sql.NullInt64 - err = db.QueryRow(`SELECT id FROM filestore_metadata WHERE path_hash = ? AND name = ?`, - string(hash[:]), name).Scan(&id) - if err != nil { - return nil, err + err = db.QueryRow(` +SELECT id, size, is_dir, unix_timestamp(timestamp) +FROM filestore_metadata WHERE path_hash = ? AND name = ?`, + hash(dir_path), name).Scan( + &result.file_id, &result.size, + &result.is_dir, &result.timestamp) + + if err == sql.ErrNoRows { + return nil, errors.New("Not found " + filename) } - if !id.Valid { - return nil, errors.New("Not found") + if err != nil { + return nil, err } - return &SqlReader{ - config_obj: self.config_obj, - file_id: id.Int64, - filename: filename, - }, nil + return result, nil } func makeDirs(components []string, db *sql.DB) error { for len(components) > 0 { dir_path := utils.JoinComponents(components[:len(components)-1], "/") name := components[len(components)-1] - hash := sha1.Sum([]byte(dir_path)) - hash_str := string(hash[:]) _, err := db.Exec(` INSERT IGNORE INTO filestore_metadata (path, path_hash, name) values(?, ?, ?)`, - dir_path, hash_str, name) + dir_path, hash(dir_path), name) if err != nil { return err } @@ -383,8 +507,6 @@ func (self *SqlFileStore) WriteFile(filename string) (FileWriter, error) { if len(components) > 0 { dir_path := utils.JoinComponents(components[:len(components)-1], "/") name := components[len(components)-1] - hash := sha1.Sum([]byte(dir_path)) - hash_str := string(hash[:]) ctx := context.Background() tx, err := db.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelSerializable}) @@ -393,12 +515,12 @@ func (self *SqlFileStore) WriteFile(filename string) (FileWriter, error) { } err = tx.QueryRow(`SELECT id, size FROM filestore_metadata WHERE path_hash =? and name = ?`, - hash_str, name).Scan(&last_id, &size) + hash(dir_path), name).Scan(&last_id, &size) if err == sql.ErrNoRows { // Create the file metadata res, err := tx.Exec(` INSERT INTO filestore_metadata (path, path_hash, name) values(?, ?, ?)`, - dir_path, string(hash[:]), name) + dir_path, hash(dir_path), name) if err != nil { _ = tx.Rollback() return nil, err @@ -433,8 +555,8 @@ INSERT INTO filestore_metadata (path, path_hash, name) values(?, ?, ?)`, }, nil } -func (self *SqlFileStore) StatFile(filename string) (*FileStoreFileInfo, error) { - return &FileStoreFileInfo{}, nil +func (self *SqlFileStore) StatFile(filename string) (os.FileInfo, error) { + return nil, errors.New("Not Implemented") } func (self *SqlFileStore) ListDirectory(dirname string) ([]os.FileInfo, error) { @@ -447,18 +569,19 @@ func (self *SqlFileStore) ListDirectory(dirname string) ([]os.FileInfo, error) { components := utils.SplitComponents(dirname) dir_name := utils.JoinComponents(components, "/") - hash := sha1.Sum([]byte(dir_name)) rows, err := db.Query(` -SELECT path, name, unix_timestamp(timestamp) FROM filestore_metadata -WHERE path_hash = ? AND path = ?`, string(hash[:]), dir_name) +SELECT path, name, unix_timestamp(timestamp), size, is_dir +FROM filestore_metadata +WHERE path_hash = ? AND path = ?`, hash(dir_name), dir_name) if err != nil { return nil, err } for rows.Next() { row := &MysqlFileStoreFileInfo{} - err = rows.Scan(&row.path, &row.name, &row.timestamp) + err = rows.Scan(&row.path, &row.name, &row.timestamp, &row.size, + &row.is_dir) if err != nil { return nil, err } @@ -516,7 +639,9 @@ func NewSqlFileStore(config_obj *config_proto.Config) (FileStore, error) { defer my_sql_mu.Unlock() if !db_initialized { - err := initializeDatabase(config_obj) + err := initializeDatabase( + config_obj, config_obj.Datastore.MysqlConnectionString, + config_obj.Datastore.MysqlDatabase) if err != nil { return nil, err } @@ -527,9 +652,10 @@ func NewSqlFileStore(config_obj *config_proto.Config) (FileStore, error) { } func initializeDatabase( - config_obj *config_proto.Config) error { + config_obj *config_proto.Config, + database_connection_string, database string) error { - db, err := sql.Open("mysql", config_obj.Datastore.MysqlConnectionString) + db, err := sql.Open("mysql", database_connection_string) if err != nil { return err } @@ -537,7 +663,7 @@ func initializeDatabase( // If specifying the connection string we assume the database // already exists. - if config_obj.Datastore.MysqlDatabase != "" { + if database != "" { // If the database does not exist we need to connect // to a blank database to issue the create database. conn_string := fmt.Sprintf("%s:%s@tcp(%s)/", @@ -551,7 +677,7 @@ func initializeDatabase( defer db.Close() _, err = db.Exec(fmt.Sprintf("create database if not exists `%v`", - config_obj.Datastore.MysqlDatabase)) + database)) if err != nil { return err } @@ -577,6 +703,7 @@ func initializeDatabase( name varchar(256), timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, size int NOT NULL DEFAULT 0, + is_dir bool NOT NULL DEFAULT false, PRIMARY KEY (id), INDEX(path_hash(20)), unique INDEX(path_hash(20), name))`) @@ -588,23 +715,28 @@ func initializeDatabase( } type SqlFileStoreAccessor struct { - file_store FileStore + file_store *SqlFileStore } func (self SqlFileStoreAccessor) New(ctx context.Context) glob.FileSystemAccessor { return &SqlFileStoreAccessor{self.file_store} } -func (self SqlFileStoreAccessor) Lstat(filename string) (glob.FileInfo, error) { +func (self *SqlFileStoreAccessor) Lstat(filename string) (glob.FileInfo, error) { lstat, err := self.file_store.StatFile(filename) if err != nil { return nil, err } - return lstat, err + return &MysqlFileStoreFileInfo{path: filename, + name: lstat.Name(), + is_dir: lstat.IsDir(), + size: lstat.Size(), + timestamp: lstat.ModTime().Unix(), + }, err } -func (self SqlFileStoreAccessor) ReadDir(path string) ([]glob.FileInfo, error) { +func (self *SqlFileStoreAccessor) ReadDir(path string) ([]glob.FileInfo, error) { files, err := self.file_store.ListDirectory(path) if err != nil { return nil, err @@ -617,8 +749,12 @@ func (self SqlFileStoreAccessor) ReadDir(path string) ([]glob.FileInfo, error) { return result, nil } -func (self SqlFileStoreAccessor) Open(path string) (glob.ReadSeekCloser, error) { - return self.file_store.ReadFile(path) +func (self *SqlFileStoreAccessor) Open(path string) (glob.ReadSeekCloser, error) { + fd, err := self.file_store.ReadFile(path) + if err != nil { + return nil, err + } + return &FileReaderAdapter{fd}, nil } var SqlFileStoreAccessor_re = regexp.MustCompile("/") diff --git a/file_store/mysql_test.go b/file_store/mysql_test.go index 0170d254bc3..d4f24dde437 100644 --- a/file_store/mysql_test.go +++ b/file_store/mysql_test.go @@ -31,21 +31,21 @@ func (self *MysqlTestSuite) SetupTest() { // Make sure our database is not the same as the datastore // tests or else we will trash over them. - self.config_obj.Datastore.MysqlDatabase += "fs" + database := self.config_obj.Datastore.MysqlDatabase + "fs" db, err := sql.Open("mysql", conn_string) assert.NoError(self.T(), err) + defer db.Close() - _, err = db.Exec(fmt.Sprintf("drop database `%v`", - self.config_obj.Datastore.MysqlDatabase)) + err = db.Ping() if err != nil { self.T().Skipf("Unable to contact mysql - skipping: %v", err) return } - defer db.Close() + db.Exec(fmt.Sprintf("drop database `%v`", database)) - initializeDatabase(self.config_obj) + initializeDatabase(self.config_obj, conn_string+database, database) self.filestore, err = NewSqlFileStore(self.config_obj) assert.NoError(self.T(), err) diff --git a/file_store/uploader.go b/file_store/uploader.go new file mode 100644 index 00000000000..1597e25d7da --- /dev/null +++ b/file_store/uploader.go @@ -0,0 +1,85 @@ +package file_store + +import ( + "context" + "crypto/md5" + "crypto/sha256" + "encoding/hex" + "io" + "path" + + config_proto "www.velocidex.com/golang/velociraptor/config/proto" + "www.velocidex.com/golang/velociraptor/uploads" + "www.velocidex.com/golang/vfilter" +) + +// An uploader into the filestore. +type FileStoreUploader struct { + file_store FileStore + root_path string +} + +func (self *FileStoreUploader) Upload( + ctx context.Context, + scope *vfilter.Scope, + filename string, + accessor string, + store_as_name string, + expected_size int64, + reader io.Reader) ( + *uploads.UploadResponse, error) { + + if store_as_name == "" { + store_as_name = filename + } + + output_path := path.Join(self.root_path, store_as_name) + + out_fd, err := self.file_store.WriteFile(output_path) + if err != nil { + scope.Log("Unable to open file %s: %v", store_as_name, err) + return nil, err + } + defer out_fd.Close() + + err = out_fd.Truncate() + if err != nil { + scope.Log("Unable to truncate file %s: %v", store_as_name, err) + return nil, err + } + + buf := make([]byte, 1024*1024) + offset := int64(0) + md5_sum := md5.New() + sha_sum := sha256.New() + + for { + n, _ := reader.Read(buf) + if n == 0 { + break + } + data := buf[:n] + + out_fd.Write(data) + md5_sum.Write(data) + sha_sum.Write(data) + + offset += int64(n) + } + + scope.Log("Uploaded %v (%v bytes)", output_path, offset) + return &uploads.UploadResponse{ + Path: output_path, + Size: uint64(offset), + Sha256: hex.EncodeToString(sha_sum.Sum(nil)), + Md5: hex.EncodeToString(md5_sum.Sum(nil)), + }, nil +} + +func NewFileStoreUploader( + config_obj *config_proto.Config, + root_path string) *FileStoreUploader { + + fs := GetFileStore(config_obj) + return &FileStoreUploader{fs, root_path} +} diff --git a/glob/glob.go b/glob/glob.go index 7a14a4d3ab5..00511bfbc84 100644 --- a/glob/glob.go +++ b/glob/glob.go @@ -28,6 +28,8 @@ import ( "strings" "time" + config_proto "www.velocidex.com/golang/velociraptor/config/proto" + "www.velocidex.com/golang/velociraptor/logging" "www.velocidex.com/golang/velociraptor/utils" ) @@ -263,7 +265,9 @@ func is_dir_or_link(f FileInfo, accessor FileSystemAccessor, depth int) bool { // version uses a context to allow cancellation. We write the FileInfo // into the output channel. func (self Globber) ExpandWithContext( - ctx context.Context, root string, + ctx context.Context, + config_obj *config_proto.Config, + root string, accessor FileSystemAccessor) <-chan FileInfo { output_chan := make(chan FileInfo) @@ -281,6 +285,8 @@ func (self Globber) ExpandWithContext( // level. files, err := accessor.ReadDir(root) if err != nil { + logging.GetLogger(config_obj, &logging.GenericComponent). + Debug("Globber.ExpandWithContext: %v", err) return } @@ -338,7 +344,7 @@ func (self Globber) ExpandWithContext( continue } for f := range next.ExpandWithContext( - ctx, next_path, accessor) { + ctx, config_obj, next_path, accessor) { output_chan <- f } } diff --git a/glob/glob_test.go b/glob/glob_test.go index 6b2490cd2dd..eb899445028 100644 --- a/glob/glob_test.go +++ b/glob/glob_test.go @@ -32,6 +32,7 @@ import ( "time" "github.com/sebdah/goldie" + "www.velocidex.com/golang/velociraptor/config" "www.velocidex.com/golang/velociraptor/utils" ) @@ -228,7 +229,8 @@ func TestGlobWithContext(t *testing.T) { } output_chan := globber.ExpandWithContext( - ctx, "/", fs_accessor) + ctx, config.GetDefaultConfig(), + "/", fs_accessor) for row := range output_chan { returned = append(returned, row.FullPath()) } diff --git a/logging/logging.go b/logging/logging.go index 666895c49e4..a0cf5bc6e2d 100644 --- a/logging/logging.go +++ b/logging/logging.go @@ -84,7 +84,8 @@ func (self *LogManager) GetLogger( self.mu.Lock() defer self.mu.Unlock() - if !config_obj.Logging.SeparateLogsPerComponent { + if config_obj.Logging != nil && + !config_obj.Logging.SeparateLogsPerComponent { component = &GenericComponent } @@ -97,7 +98,8 @@ func (self *LogManager) GetLogger( &ClientComponent, &GUIComponent, &APICmponent: logger := self.makeNewComponent(config_obj, component) - if config_obj.Logging.SeparateLogsPerComponent { + if config_obj.Logging != nil && + config_obj.Logging.SeparateLogsPerComponent { self.contexts[component] = logger return logger } else { @@ -150,7 +152,8 @@ func (self *LogManager) makeNewComponent( Log.Out = ioutil.Discard Log.Level = logrus.DebugLevel - if config_obj.Logging.OutputDirectory != "" { + if config_obj.Logging != nil && + config_obj.Logging.OutputDirectory != "" { err := os.MkdirAll(config_obj.Logging.OutputDirectory, 0700) if err != nil { panic("Unable to create logging directory.") diff --git a/reporting/container.go b/reporting/container.go index c5405f0dc1f..d3fb7af0570 100644 --- a/reporting/container.go +++ b/reporting/container.go @@ -19,8 +19,8 @@ import ( actions_proto "www.velocidex.com/golang/velociraptor/actions/proto" config_proto "www.velocidex.com/golang/velociraptor/config/proto" "www.velocidex.com/golang/velociraptor/file_store/csv" + "www.velocidex.com/golang/velociraptor/uploads" "www.velocidex.com/golang/velociraptor/utils" - vql_networking "www.velocidex.com/golang/velociraptor/vql/networking" "www.velocidex.com/golang/vfilter" ) @@ -195,7 +195,7 @@ func (self *Container) Upload( accessor string, store_as_name string, expected_size int64, - reader io.Reader) (*vql_networking.UploadResponse, error) { + reader io.Reader) (*uploads.UploadResponse, error) { self.Lock() defer self.Unlock() @@ -229,12 +229,12 @@ func (self *Container) Upload( n, err := utils.Copy(ctx, utils.NewTee(writer, sha_sum, md5_sum), reader) if err != nil { - return &vql_networking.UploadResponse{ + return &uploads.UploadResponse{ Error: err.Error(), }, err } - return &vql_networking.UploadResponse{ + return &uploads.UploadResponse{ Path: sanitized_name, Size: uint64(n), Sha256: hex.EncodeToString(sha_sum.Sum(nil)), diff --git a/uploads/api.go b/uploads/api.go new file mode 100644 index 00000000000..05b00d0a204 --- /dev/null +++ b/uploads/api.go @@ -0,0 +1,29 @@ +// Uploaders deliver files from accessors to the server (or another target). +package uploads + +import ( + "context" + "io" + + "www.velocidex.com/golang/vfilter" +) + +// Returned as the result of the query. +type UploadResponse struct { + Path string `json:"Path"` + Size uint64 `json:"Size"` + Error string `json:"Error,omitempty"` + Sha256 string `json:"sha256,omitempty"` + Md5 string `json:"md5,omitempty"` +} + +// Provide an uploader capable of uploading any reader object. +type Uploader interface { + Upload(ctx context.Context, + scope *vfilter.Scope, + filename string, + accessor string, + store_as_name string, + expected_size int64, + reader io.Reader) (*UploadResponse, error) +} diff --git a/uploads/client_uploader.go b/uploads/client_uploader.go new file mode 100644 index 00000000000..0327d2c7278 --- /dev/null +++ b/uploads/client_uploader.go @@ -0,0 +1,90 @@ +package uploads + +import ( + "context" + "crypto/md5" + "crypto/sha256" + "encoding/hex" + "errors" + "io" + + actions_proto "www.velocidex.com/golang/velociraptor/actions/proto" + "www.velocidex.com/golang/velociraptor/constants" + crypto_proto "www.velocidex.com/golang/velociraptor/crypto/proto" + "www.velocidex.com/golang/velociraptor/responder" + "www.velocidex.com/golang/vfilter" +) + +// An uploader delivering files from client to server. +type VelociraptorUploader struct { + Responder *responder.Responder + Count int +} + +func (self *VelociraptorUploader) Upload( + ctx context.Context, + scope *vfilter.Scope, + filename string, + accessor string, + store_as_name string, + expected_size int64, + reader io.Reader) ( + *UploadResponse, error) { + result := &UploadResponse{ + Path: filename, + } + + if store_as_name == "" { + store_as_name = filename + } + + offset := uint64(0) + self.Count += 1 + + md5_sum := md5.New() + sha_sum := sha256.New() + + for { + // Ensure there is a fresh allocation for every + // iteration to prevent overwriting in flight buffers. + buffer := make([]byte, 1024*1024) + read_bytes, err := reader.Read(buffer) + data := buffer[:read_bytes] + sha_sum.Write(data) + md5_sum.Write(data) + + packet := &actions_proto.FileBuffer{ + Pathspec: &actions_proto.PathSpec{ + Path: store_as_name, + Accessor: accessor, + }, + Offset: offset, + Size: uint64(expected_size), + Data: data, + Eof: err == io.EOF, + } + + select { + case <-ctx.Done(): + return nil, errors.New("Cancelled!") + + default: + // Send the packet to the server. + self.Responder.AddResponse(&crypto_proto.GrrMessage{ + RequestId: constants.TransferWellKnownFlowId, + FileBuffer: packet}) + } + + offset += uint64(read_bytes) + if err != nil && err != io.EOF { + return nil, err + } + + if read_bytes == 0 { + result.Size = offset + result.Sha256 = hex.EncodeToString(sha_sum.Sum(nil)) + result.Md5 = hex.EncodeToString(md5_sum.Sum(nil)) + return result, nil + } + } +} diff --git a/vql/networking/uploader.go b/uploads/file_based.go similarity index 57% rename from vql/networking/uploader.go rename to uploads/file_based.go index 1282e37f074..a59a1b7ef0c 100644 --- a/vql/networking/uploader.go +++ b/uploads/file_based.go @@ -15,7 +15,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package networking +package uploads import ( "context" @@ -29,35 +29,11 @@ import ( "regexp" "runtime" - actions_proto "www.velocidex.com/golang/velociraptor/actions/proto" - constants "www.velocidex.com/golang/velociraptor/constants" - crypto_proto "www.velocidex.com/golang/velociraptor/crypto/proto" "www.velocidex.com/golang/velociraptor/datastore" - "www.velocidex.com/golang/velociraptor/responder" "www.velocidex.com/golang/velociraptor/utils" "www.velocidex.com/golang/vfilter" ) -// Returned as the result of the query. -type UploadResponse struct { - Path string `json:"Path"` - Size uint64 `json:"Size"` - Error string `json:"Error,omitempty"` - Sha256 string `json:"sha256,omitempty"` - Md5 string `json:"md5,omitempty"` -} - -// Provide an uploader capable of uploading any reader object. -type Uploader interface { - Upload(ctx context.Context, - scope *vfilter.Scope, - filename string, - accessor string, - store_as_name string, - expected_size int64, - reader io.Reader) (*UploadResponse, error) -} - type FileBasedUploader struct { UploadDir string } @@ -152,76 +128,3 @@ func (self *FileBasedUploader) Upload( Md5: hex.EncodeToString(md5_sum.Sum(nil)), }, nil } - -type VelociraptorUploader struct { - Responder *responder.Responder - Count int -} - -func (self *VelociraptorUploader) Upload( - ctx context.Context, - scope *vfilter.Scope, - filename string, - accessor string, - store_as_name string, - expected_size int64, - reader io.Reader) ( - *UploadResponse, error) { - result := &UploadResponse{ - Path: filename, - } - - if store_as_name == "" { - store_as_name = filename - } - - offset := uint64(0) - self.Count += 1 - - md5_sum := md5.New() - sha_sum := sha256.New() - - for { - // Ensure there is a fresh allocation for every - // iteration to prevent overwriting in flight buffers. - buffer := make([]byte, 1024*1024) - read_bytes, err := reader.Read(buffer) - data := buffer[:read_bytes] - sha_sum.Write(data) - md5_sum.Write(data) - - packet := &actions_proto.FileBuffer{ - Pathspec: &actions_proto.PathSpec{ - Path: store_as_name, - Accessor: accessor, - }, - Offset: offset, - Size: uint64(expected_size), - Data: data, - Eof: err == io.EOF, - } - - select { - case <-ctx.Done(): - return nil, errors.New("Cancelled!") - - default: - // Send the packet to the server. - self.Responder.AddResponse(&crypto_proto.GrrMessage{ - RequestId: constants.TransferWellKnownFlowId, - FileBuffer: packet}) - } - - offset += uint64(read_bytes) - if err != nil && err != io.EOF { - return nil, err - } - - if read_bytes == 0 { - result.Size = offset - result.Sha256 = hex.EncodeToString(sha_sum.Sum(nil)) - result.Md5 = hex.EncodeToString(md5_sum.Sum(nil)) - return result, nil - } - } -} diff --git a/vql/acls.go b/vql/acls.go index f34c16127a2..ff378c61a51 100644 --- a/vql/acls.go +++ b/vql/acls.go @@ -99,6 +99,11 @@ func CheckFilesystemAccess(scope *vfilter.Scope, accessor string) error { case "data": return nil + // Direct filestore access only allowed for server + // admins. + case "filestore", "fs": + return CheckAccess(scope, acls.SERVER_ADMIN) + default: return CheckAccess(scope, acls.FILESYSTEM_READ) } diff --git a/vql/filesystem/filesystem.go b/vql/filesystem/filesystem.go index 90073b29e4f..9ae9cce3e47 100644 --- a/vql/filesystem/filesystem.go +++ b/vql/filesystem/filesystem.go @@ -24,6 +24,7 @@ import ( "github.com/Velocidex/ordereddict" "github.com/pkg/errors" "github.com/shirou/gopsutil/disk" + config_proto "www.velocidex.com/golang/velociraptor/config/proto" "www.velocidex.com/golang/velociraptor/glob" vql_subsystem "www.velocidex.com/golang/velociraptor/vql" "www.velocidex.com/golang/vfilter" @@ -46,6 +47,12 @@ func (self GlobPlugin) Call( go func() { defer close(output_chan) + any_config_obj, _ := scope.Resolve("server_config") + config_obj, ok := any_config_obj.(*config_proto.Config) + if !ok { + config_obj = &config_proto.Config{} + } + arg := &GlobPluginArgs{} err := vfilter.ExtractArgs(scope, args, arg) if err != nil { @@ -78,7 +85,7 @@ func (self GlobPlugin) Call( } file_chan := globber.ExpandWithContext( - ctx, root, accessor) + ctx, config_obj, root, accessor) for { select { case <-ctx.Done(): diff --git a/vql/filesystem/raw_registry.go b/vql/filesystem/raw_registry.go index 50e3c9728b0..9e3d56cb3b7 100644 --- a/vql/filesystem/raw_registry.go +++ b/vql/filesystem/raw_registry.go @@ -44,6 +44,7 @@ import ( "github.com/Velocidex/ordereddict" errors "github.com/pkg/errors" "www.velocidex.com/golang/regparser" + config_proto "www.velocidex.com/golang/velociraptor/config/proto" "www.velocidex.com/golang/velociraptor/glob" "www.velocidex.com/golang/velociraptor/utils" vql_subsystem "www.velocidex.com/golang/velociraptor/vql" @@ -406,6 +407,12 @@ func (self ReadKeyValues) Call( go func() { defer close(output_chan) + any_config_obj, _ := scope.Resolve("server_config") + config_obj, ok := any_config_obj.(*config_proto.Config) + if !ok { + config_obj = &config_proto.Config{} + } + arg := &ReadKeyValuesArgs{} err := vfilter.ExtractArgs(scope, args, arg) if err != nil { @@ -442,7 +449,7 @@ func (self ReadKeyValues) Call( } file_chan := globber.ExpandWithContext( - ctx, root, accessor) + ctx, config_obj, root, accessor) for { select { case <-ctx.Done(): diff --git a/vql/networking/upload.go b/vql/networking/upload.go index 2734ab3b006..977f2a7c243 100644 --- a/vql/networking/upload.go +++ b/vql/networking/upload.go @@ -22,6 +22,7 @@ import ( "github.com/Velocidex/ordereddict" "www.velocidex.com/golang/velociraptor/glob" + "www.velocidex.com/golang/velociraptor/uploads" vql_subsystem "www.velocidex.com/golang/velociraptor/vql" "www.velocidex.com/golang/vfilter" ) @@ -45,7 +46,7 @@ func (self *UploadFunction) Call(ctx context.Context, scope.Log("upload: Uploader not configured.") return vfilter.Null{} } - uploader, ok := uploader_obj.(Uploader) + uploader, ok := uploader_obj.(uploads.Uploader) if ok { arg := &UploadFunctionArgs{} err := vfilter.ExtractArgs(scope, args, arg) @@ -67,7 +68,7 @@ func (self *UploadFunction) Call(ctx context.Context, accessor, err := glob.GetAccessor(arg.Accessor, ctx) if err != nil { scope.Log("upload: %v", err) - return &UploadResponse{ + return &uploads.UploadResponse{ Error: err.Error(), } } @@ -76,7 +77,7 @@ func (self *UploadFunction) Call(ctx context.Context, if err != nil { scope.Log("upload: Unable to open %s: %s", arg.File, err.Error()) - return &UploadResponse{ + return &uploads.UploadResponse{ Error: err.Error(), } } @@ -94,7 +95,7 @@ func (self *UploadFunction) Call(ctx context.Context, stat.Size(), // Expected size. file) if err != nil { - return &UploadResponse{ + return &uploads.UploadResponse{ Error: err.Error(), } } @@ -138,7 +139,7 @@ func (self *UploadPlugin) Call( } uploader_obj, _ := scope.Resolve("$uploader") - uploader, ok := uploader_obj.(Uploader) + uploader, ok := uploader_obj.(uploads.Uploader) if !ok { scope.Log("upload: Uploader not configured.") @@ -194,7 +195,7 @@ func (self UploadPlugin) Info(scope *vfilter.Scope, type_map *vfilter.TypeMap) * return &vfilter.PluginInfo{ Name: "upload", Doc: "Upload files to the server.", - RowType: type_map.AddType(scope, &UploadResponse{}), + RowType: type_map.AddType(scope, &uploads.UploadResponse{}), ArgType: type_map.AddType(scope, &UploadPluginArgs{}), } } diff --git a/vql/server/flows.go b/vql/server/flows.go index 134c30a1869..24b0db67e06 100644 --- a/vql/server/flows.go +++ b/vql/server/flows.go @@ -226,12 +226,18 @@ func (self EnumerateFlowPlugin) Call( artifacts.ModeNameToMode(artifact.Type)) globber := make(glob.Globber) - accessor := file_store.GetFileStoreFileSystemAccessor(config_obj) + accessor, err := file_store.GetFileStoreFileSystemAccessor(config_obj) + if err != nil { + scope.Log("enumerate_flow: %v", err) + return + } + globber.Add(csv_path, accessor.PathSplit) // Expanding the glob is not sorted but we really need // to go in order of dates. - for hit := range globber.ExpandWithContext(ctx, "", accessor) { + for hit := range globber.ExpandWithContext( + ctx, config_obj, "", accessor) { full_path := hit.FullPath() emit("Result", full_path) } diff --git a/vql/server/monitoring.go b/vql/server/monitoring.go index 2c2af0639f0..c83e7bfcab1 100644 --- a/vql/server/monitoring.go +++ b/vql/server/monitoring.go @@ -21,7 +21,6 @@ package server import ( "context" - "fmt" "io" "time" @@ -102,10 +101,16 @@ func (self MonitoringPlugin) Call( source, mode) globber := make(glob.Globber) - accessor := file_store.GetFileStoreFileSystemAccessor(config_obj) + accessor, err := file_store.GetFileStoreFileSystemAccessor(config_obj) + if err != nil { + scope.Log("monitoring: %v", err) + return + } + globber.Add(log_path, accessor.PathSplit) - for hit := range globber.ExpandWithContext(ctx, "", accessor) { + for hit := range globber.ExpandWithContext( + ctx, config_obj, "", accessor) { err := self.ScanLog(config_obj, scope, output_chan, hit.FullPath()) @@ -243,7 +248,11 @@ func (self WatchMonitoringPlugin) Call( } globber := make(glob.Globber) - accessor := file_store.GetFileStoreFileSystemAccessor(config_obj) + accessor, err := file_store.GetFileStoreFileSystemAccessor(config_obj) + if err != nil { + scope.Log("watch_monitoring: %v", err) + return + } // dir_state contains the initial state of the log // files when we first start watching. If the file @@ -258,18 +267,17 @@ func (self WatchMonitoringPlugin) Call( source, mode) globber.Add(log_path, accessor.PathSplit) - fmt.Printf("Globbing for %v (%T)\n", log_path, accessor) - // Capture the initial state of the files. We will // only monitor events after this point. - for item := range globber.ExpandWithContext(ctx, "", accessor) { - fmt.Printf("Glob for %v\n", item.FullPath()) + for item := range globber.ExpandWithContext( + ctx, config_obj, "", accessor) { dir_state[item.FullPath()] = info{item.ModTime(), item.Size()} } // Spin forever here and emit new files or events. for { - for item := range globber.ExpandWithContext(ctx, "", accessor) { + for item := range globber.ExpandWithContext( + ctx, config_obj, "", accessor) { self.ScanLog(ctx, config_obj, scope, dir_state, output_chan, item, arg.ClientId, arg.Artifact) diff --git a/vql/server/results.go b/vql/server/results.go index 80fb149a835..de45a164976 100644 --- a/vql/server/results.go +++ b/vql/server/results.go @@ -206,13 +206,19 @@ func (self SourcePlugin) Call( } globber := make(glob.Globber) - accessor := file_store.GetFileStoreFileSystemAccessor(config_obj) + accessor, err := file_store.GetFileStoreFileSystemAccessor(config_obj) + if err != nil { + scope.Log("source: %v", err) + return + } + globber.Add(csv_path, accessor.PathSplit) // Expanding the glob is not sorted but we really need // to go in order of dates. hits := []string{} - for hit := range globber.ExpandWithContext(ctx, "", accessor) { + for hit := range globber.ExpandWithContext( + ctx, config_obj, "", accessor) { hits = append(hits, hit.FullPath()) } sort.Strings(hits) diff --git a/vql/tools/gcs_upload.go b/vql/tools/gcs_upload.go index 9d52e30034b..1bf9aeb2d42 100644 --- a/vql/tools/gcs_upload.go +++ b/vql/tools/gcs_upload.go @@ -14,9 +14,9 @@ import ( "golang.org/x/net/context" "google.golang.org/api/option" "www.velocidex.com/golang/velociraptor/glob" + "www.velocidex.com/golang/velociraptor/uploads" "www.velocidex.com/golang/velociraptor/utils" vql_subsystem "www.velocidex.com/golang/velociraptor/vql" - "www.velocidex.com/golang/velociraptor/vql/networking" "www.velocidex.com/golang/vfilter" ) @@ -89,7 +89,7 @@ func upload_gcs(ctx context.Context, scope *vfilter.Scope, reader io.Reader, projectID, bucket, name string, credentials string) ( - *networking.UploadResponse, error) { + *uploads.UploadResponse, error) { // Cache the bucket handle between invocations. var bucket_handle *storage.BucketHandle @@ -141,12 +141,12 @@ func upload_gcs(ctx context.Context, scope *vfilter.Scope, n, err := utils.Copy(ctx, utils.NewTee( writer, sha_sum, md5_sum, log_writer), reader) if err != nil { - return &networking.UploadResponse{ + return &uploads.UploadResponse{ Error: err.Error(), }, err } - return &networking.UploadResponse{ + return &uploads.UploadResponse{ Path: name, Size: uint64(n), Sha256: hex.EncodeToString(sha_sum.Sum(nil)), diff --git a/vql/tools/s3_upload.go b/vql/tools/s3_upload.go index c5dc3aa8c6e..4f2de2d1c1c 100644 --- a/vql/tools/s3_upload.go +++ b/vql/tools/s3_upload.go @@ -12,8 +12,8 @@ import ( "github.com/aws/aws-sdk-go/service/s3/s3manager" "golang.org/x/net/context" "www.velocidex.com/golang/velociraptor/glob" + "www.velocidex.com/golang/velociraptor/uploads" vql_subsystem "www.velocidex.com/golang/velociraptor/vql" - "www.velocidex.com/golang/velociraptor/vql/networking" "www.velocidex.com/golang/vfilter" ) @@ -94,7 +94,7 @@ func upload_S3(ctx context.Context, scope *vfilter.Scope, reader io.Reader, bucket, name string, credentialsKey string, credentialsSecret string, region string) ( - *networking.UploadResponse, error) { + *uploads.UploadResponse, error) { scope.Log("upload_S3: Uploading %v to %v", name, bucket) @@ -102,7 +102,7 @@ func upload_S3(ctx context.Context, scope *vfilter.Scope, creds := credentials.NewStaticCredentials(credentialsKey, credentialsSecret, token) _, err := creds.Get() if err != nil { - return &networking.UploadResponse{ + return &uploads.UploadResponse{ Error: err.Error(), }, err } @@ -118,12 +118,12 @@ func upload_S3(ctx context.Context, scope *vfilter.Scope, Body: reader, }) if err != nil { - return &networking.UploadResponse{ + return &uploads.UploadResponse{ Error: err.Error(), }, err } - return &networking.UploadResponse{ + return &uploads.UploadResponse{ Path: result.Location, }, nil }