Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Sync to Master #3824

Merged
merged 33 commits into from
Oct 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
4f805d3
Add Execution field (#3774)
mgreen27 Sep 24, 2024
41ea4e8
add base64 extraction improvement (#3776)
mgreen27 Sep 26, 2024
dc5e723
Update Lnk.yaml (#3777)
mgreen27 Sep 26, 2024
6f82e20
Various bug fixes (#3780)
scudette Sep 29, 2024
566cd84
Truncate long lines in Artifact Description GUI (#3782)
scudette Sep 30, 2024
c51b6fd
Bugfix: Marshaling linux paths should not escape quotes (#3784)
scudette Sep 30, 2024
922a754
Added column resizers to tables. (#3786)
scudette Oct 1, 2024
df0ad26
Remove obsolete count parameter from Server.Information.Clients (#3788)
predictiple Oct 1, 2024
e94d6e9
Update LNK typos (#3789)
mgreen27 Oct 1, 2024
1c1f9eb
Columns can be reordered by drag and drop (#3790)
scudette Oct 1, 2024
0bae4d8
Usability enhancements for table GUI (#3792)
scudette Oct 2, 2024
6f3389b
Bugfix: Parameter type artifactset should serialize to CSV (#3793)
scudette Oct 2, 2024
447c11a
Redesigned date/time selector. (#3795)
scudette Oct 3, 2024
355f50b
Theme tweaks (#3798)
predictiple Oct 3, 2024
85ba6bf
Update calendar to be more timezone aware. (#3799)
scudette Oct 4, 2024
a569502
Added timeboxing for the Journald parser (#3800)
scudette Oct 4, 2024
f31ca25
Bugfix: Dashboard did not pass the Sample parameter correctly. (#3802)
scudette Oct 4, 2024
f74ad6a
Add a Journal notebook suggestion: Simple syslog-like view (#3803)
misje Oct 4, 2024
39efccf
Update timeline to display times in user timezone. (#3804)
scudette Oct 5, 2024
965dba2
Table context menu allows annotation to timeline (#3805)
scudette Oct 7, 2024
05652f5
Sync latest KapeFiles Targets (#3806)
scudette Oct 8, 2024
108d574
Fix timeline markers timezone correction. (#3807)
scudette Oct 9, 2024
daea247
Add Zeroed Header update (#3808)
mgreen27 Oct 9, 2024
25e8e39
Bugfixes: Force timeline message to be a string. (#3811)
scudette Oct 9, 2024
0a1d107
Bugfix: Updated table API to use JSON (#3812)
scudette Oct 10, 2024
96107f5
adding IP analysis to VT enrichment artifact (#3813)
shortstack Oct 10, 2024
6dd8cda
Bugfix: Hunt label UI css improvements. (#3815)
scudette Oct 10, 2024
fe6ab3d
Bugfix: Fix broken shell viewer and metadata viewer. (#3816)
scudette Oct 10, 2024
4321578
Bugfix: Fixed broken links in dashboard (#3817)
scudette Oct 11, 2024
455060b
Bugfix: Render bools and numbers as plain text in tables. (#3818)
scudette Oct 12, 2024
a8bf2d8
Backups should also include custom artifacts. (#3819)
scudette Oct 12, 2024
e456841
Bugfix: Force GC run instead of return memory to the OS (#3820)
scudette Oct 13, 2024
fd245c9
Bugfixes: Add to timeline dialog broke (#3823)
scudette Oct 13, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ lint:
golangci-lint run

KapeFilesSync:
python3 scripts/kape_files.py -t win ~/projects/KapeFiles/ > artifacts/definitions/Windows/KapeFiles/Targets.yaml
python3 scripts/kape_files.py -t win --state_file_path scripts/templates/kape_files_state.json ~/projects/KapeFiles/ > artifacts/definitions/Windows/KapeFiles/Targets.yaml

SQLECmdSync:
python3 scripts/sqlecmd_convert.py ~/projects/SQLECmd/ ~/projects/KapeFiles/ artifacts/definitions/Generic/Collectors/SQLECmd.yaml
Expand Down
16 changes: 13 additions & 3 deletions accessors/manipulators.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@ var (

// This is a generic Path manipulator that implements the escaping
// standard as used by Velociraptor:
// 1. Path separators are / but will be able to use \\ to parse.
// 2. Each component is optionally quoted if it contains special
// characters (like path separators).
// 1. Path separators are / but will be able to use \\ to parse.
// 2. Each component is optionally quoted if it contains special
// characters (like path separators).
type GenericPathManipulator struct {
Sep string
}
Expand Down Expand Up @@ -124,6 +124,16 @@ func (self LinuxPathManipulator) PathParse(path string, result *OSPath) error {
return nil
}

func (self LinuxPathManipulator) PathJoin(path *OSPath) string {
osPathSerializations.Inc()

result := self.AsPathSpec(path)
if result.GetDelegateAccessor() == "" && result.GetDelegatePath() == "" {
return result.Path
}
return result.String()
}

func (self LinuxPathManipulator) AsPathSpec(path *OSPath) *PathSpec {
result := path.pathspec
if result == nil {
Expand Down
32 changes: 32 additions & 0 deletions accessors/manipulators_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,43 @@ type testcase struct {
expected_path string
}

var generic_testcases = []testcase{
// Generic paths try to take a good guess of the path type:
// 1. Use / or \ as path separator
// 2. Quotes represent unbroken paths.
{"/bin/file\\1.txt", []string{"bin", "file", "1.txt"}, "/bin/file/1.txt"},

// Quotes in the filename are escaped by doubling up and enclosing
// the component with a single quote.
{"/bin/file\"1\".txt", []string{"bin", "file\"1\".txt"},
`/bin/"file""1"".txt"`},

{`/bin/"file""1"".txt"`, []string{"bin", "file\"1\".txt"},
`/bin/"file""1"".txt"`},

// Enclosing a path in quotes treats it as a single literal
// component.
{"/bin/\"file\\1.txt\"", []string{"bin", "file\\1.txt"}, "/bin/\"file\\1.txt\""},
}

func TestGenericManipulators(t *testing.T) {
for _, testcase := range generic_testcases {
path, err := NewGenericOSPath(testcase.serialized_path)
assert.NoError(t, err)
assert.Equal(t, testcase.components, path.Components)
assert.Equal(t, testcase.expected_path, path.String())
}
}

var linux_testcases = []testcase{
{"/bin/ls", []string{"bin", "ls"}, "/bin/ls"},
{"bin////ls", []string{"bin", "ls"}, "/bin/ls"},
{"/bin/ls////", []string{"bin", "ls"}, "/bin/ls"},

// Files with non-path backslash characters should be parsed as
// one filename. They should also be serialized as a single file.
{"/bin/file\\1.txt", []string{"bin", "file\\1.txt"}, "/bin/file\\1.txt"},

// Ignore and dont support directory traversal at all
{"/bin/../../../.././../../ls", []string{"bin", "ls"}, "/bin/ls"},

Expand Down
65 changes: 44 additions & 21 deletions accessors/vfs/vfs.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package vfs

import (
"context"
"encoding/json"
"errors"

"github.com/Velocidex/ordereddict"
Expand All @@ -11,14 +10,17 @@ import (
api_proto "www.velocidex.com/golang/velociraptor/api/proto"
config_proto "www.velocidex.com/golang/velociraptor/config/proto"
flows_proto "www.velocidex.com/golang/velociraptor/flows/proto"
"www.velocidex.com/golang/velociraptor/json"
"www.velocidex.com/golang/velociraptor/services"
"www.velocidex.com/golang/velociraptor/utils"
vql_subsystem "www.velocidex.com/golang/velociraptor/vql"
"www.velocidex.com/golang/vfilter"
)

var (
ErrNotFound = errors.New("file not found")
ErrNotAvailable = errors.New("File content not available")
ErrInvalidRow = errors.New("Stored row is invalid")
)

type VFSFileSystemAccessor struct {
Expand Down Expand Up @@ -97,14 +99,18 @@ func (self VFSFileSystemAccessor) LstatWithOSPath(filename *accessors.OSPath) (
if err != nil {
return nil, err
}

// Find the row that matches this filename
for _, r := range res.Rows {
if len(r.Cell) < 12 {
var row []interface{}
_ = json.Unmarshal([]byte(r.Json), &row)
if len(row) < 12 {
continue
}

if r.Cell[5] == filename.Basename() {
return rowCellToFSInfo(r.Cell)
name, ok := row[5].(string)
if ok && name == filename.Basename() {
return rowCellToFSInfo(row)
}
}

Expand Down Expand Up @@ -147,11 +153,13 @@ func (self VFSFileSystemAccessor) ReadDirWithOSPath(

result := []accessors.FileInfo{}
for _, r := range res.Rows {
if len(r.Cell) < 12 {
var row []interface{}
_ = json.Unmarshal([]byte(r.Json), &row)
if len(row) < 12 {
continue
}

fs_info, err := rowCellToFSInfo(r.Cell)
fs_info, err := rowCellToFSInfo(row)
if err != nil {
continue
}
Expand Down Expand Up @@ -191,14 +199,21 @@ func (self VFSFileSystemAccessor) OpenWithOSPath(filename *accessors.OSPath) (

// Find the row that matches this filename
for _, r := range res.Rows {
if len(r.Cell) < 12 {
var row []interface{}
_ = json.Unmarshal([]byte(r.Json), &row)
if len(row) < 12 {
continue
}

name, ok := row[5].(string)
if !ok {
continue
}

if r.Cell[5] == filename.Basename() {
if name == filename.Basename() {
// Check if it has a download link
record := &flows_proto.VFSDownloadInfo{}
err = json.Unmarshal([]byte(r.Cell[0]), record)
err = utils.ParseIntoProtobuf(row[0], record)
if err != nil || record.Name == "" {
return nil, ErrNotAvailable
}
Expand All @@ -211,23 +226,31 @@ func (self VFSFileSystemAccessor) OpenWithOSPath(filename *accessors.OSPath) (
return nil, ErrNotFound
}

func rowCellToFSInfo(cell []string) (accessors.FileInfo, error) {
components := []string{}
err := json.Unmarshal([]byte(cell[2]), &components)
if err != nil {
return nil, err
func rowCellToFSInfo(cell []interface{}) (accessors.FileInfo, error) {
components := utils.ConvertToStringSlice(cell[2])
if len(components) == 0 {
return nil, ErrInvalidRow
}

size := int64(0)
_ = json.Unmarshal([]byte(cell[6]), &size)
size, ok := utils.ToInt64(cell[6])
if !ok {
return nil, ErrInvalidRow
}

is_dir := false
if len(cell[7]) > 1 && cell[7][0] == 'd' {
is_dir = true
mode, ok := cell[7].(string)
if !ok {
return nil, ErrInvalidRow
}

is_dir := len(mode) > 1 && mode[0] == 'd'

// The Accessor + components is the path of the item
path := accessors.MustNewGenericOSPath(cell[3]).Append(components...)
ospath, ok := cell[3].(string)
if !ok {
return nil, ErrInvalidRow
}

path := accessors.MustNewGenericOSPath(ospath).Append(components...)
fs_info := &accessors.VirtualFileInfo{
Path: path,
IsDir_: is_dir,
Expand All @@ -237,7 +260,7 @@ func rowCellToFSInfo(cell []string) (accessors.FileInfo, error) {

// The download pointer allows us to fetch the file itself.
record := &flows_proto.VFSDownloadInfo{}
err = json.Unmarshal([]byte(cell[0]), record)
err := utils.ParseIntoProtobuf(cell[0], record)
if err == nil {
fs_info.Data_.Set("DownloadInfo", record)
}
Expand Down
12 changes: 9 additions & 3 deletions api/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,11 +38,11 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/reflection"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/emptypb"
"www.velocidex.com/golang/velociraptor/acls"
actions_proto "www.velocidex.com/golang/velociraptor/actions/proto"
"www.velocidex.com/golang/velociraptor/api/authenticators"
"www.velocidex.com/golang/velociraptor/api/proto"
api_proto "www.velocidex.com/golang/velociraptor/api/proto"
"www.velocidex.com/golang/velociraptor/api/tables"
artifacts_proto "www.velocidex.com/golang/velociraptor/artifacts/proto"
Expand All @@ -62,7 +62,7 @@ import (
)

type ApiServer struct {
proto.UnimplementedAPIServer
api_proto.UnimplementedAPIServer
server_obj *server.Server
ca_pool *x509.CertPool
wg *sync.WaitGroup
Expand Down Expand Up @@ -713,8 +713,14 @@ func (self *ApiServer) GetArtifacts(

for _, name := range in.Names {
artifact, pres := repository.Get(ctx, org_config_obj, name)
artifact_clone := proto.Clone(artifact).(*artifacts_proto.Artifact)
for _, s := range artifact_clone.Sources {
s.Queries = nil
}
artifact_clone.Raw = ""

if pres {
result.Items = append(result.Items, artifact)
result.Items = append(result.Items, artifact_clone)
}
}
return result, nil
Expand Down
2 changes: 1 addition & 1 deletion api/authenticators/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ func getSignedJWTTokenCookie(
// replay sessioon cookies past expiry.
expiry := time.Now().Add(time.Minute * time.Duration(expiry_min))

// Enfore the JWT to expire
// Enforce the JWT to expire
claims.Expires = float64(expiry.Unix())

// Make a JWT and sign it.
Expand Down
29 changes: 18 additions & 11 deletions api/flows.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,20 +79,27 @@ func (self *ApiServer) GetClientFlows(
if flow.Request == nil {
continue
}
row_data := []string{
row_data := []interface{}{
flow.State.String(),
flow.SessionId,
json.AnyToString(flow.Request.Artifacts, vjson.DefaultEncOpts()),
json.AnyToString(flow.CreateTime, vjson.DefaultEncOpts()),
json.AnyToString(flow.ActiveTime, vjson.DefaultEncOpts()),
json.AnyToString(flow.Request.Creator, vjson.DefaultEncOpts()),
json.AnyToString(flow.TotalUploadedBytes, vjson.DefaultEncOpts()),
json.AnyToString(flow.TotalCollectedRows, vjson.DefaultEncOpts()),
json.MustMarshalProtobufString(flow, vjson.DefaultEncOpts()),
json.AnyToString(flow.Request.Urgent, vjson.DefaultEncOpts()),
json.AnyToString(flow.ArtifactsWithResults, vjson.DefaultEncOpts()),
flow.Request.Artifacts,
flow.CreateTime,
flow.ActiveTime,
flow.Request.Creator,
flow.TotalUploadedBytes,
flow.TotalCollectedRows,
json.ConvertProtoToOrderedDict(flow),
flow.Request.Urgent,
flow.ArtifactsWithResults,
}
result.Rows = append(result.Rows, &api_proto.Row{Cell: row_data})
opts := vjson.DefaultEncOpts()
serialized, err := json.MarshalWithOptions(row_data, opts)
if err != nil {
continue
}
result.Rows = append(result.Rows, &api_proto.Row{
Json: string(serialized),
})
}

return result, nil
Expand Down
44 changes: 29 additions & 15 deletions api/hunts.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,18 +74,26 @@ func (self *ApiServer) GetHuntFlows(
continue
}

row_data := []string{
row_data := []interface{}{
flow.Context.ClientId,
services.GetHostname(ctx, org_config_obj, flow.Context.ClientId),
flow.Context.SessionId,
json.AnyToString(flow.Context.StartTime/1000, vjson.DefaultEncOpts()),
flow.Context.StartTime / 1000,
flow.Context.State.String(),
json.AnyToString(flow.Context.ExecutionDuration/1000000000,
vjson.DefaultEncOpts()),
json.AnyToString(flow.Context.TotalUploadedBytes, vjson.DefaultEncOpts()),
json.AnyToString(flow.Context.TotalCollectedRows, vjson.DefaultEncOpts())}
flow.Context.ExecutionDuration / 1000000000,
flow.Context.TotalUploadedBytes,
flow.Context.TotalCollectedRows,
}

opts := vjson.DefaultEncOpts()
serialized, err := json.MarshalWithOptions(row_data, opts)
if err != nil {
continue
}

result.Rows = append(result.Rows, &api_proto.Row{Cell: row_data})
result.Rows = append(result.Rows, &api_proto.Row{
Json: string(serialized),
})

if uint64(len(result.Rows)) > in.Rows {
break
Expand Down Expand Up @@ -142,19 +150,25 @@ func (self *ApiServer) GetHuntTable(
total_clients_scheduled = hunt.Stats.TotalClientsScheduled
}

row_data := []string{
row_data := []interface{}{
fmt.Sprintf("%v", hunt.State),
json.AnyToString(hunt.Tags, vjson.DefaultEncOpts()),
hunt.Tags,
hunt.HuntId,
hunt.HuntDescription,
json.AnyToString(hunt.CreateTime, vjson.DefaultEncOpts()),
json.AnyToString(hunt.StartTime, vjson.DefaultEncOpts()),
json.AnyToString(hunt.Expires, vjson.DefaultEncOpts()),
fmt.Sprintf("%v", total_clients_scheduled),
hunt.CreateTime,
hunt.StartTime,
hunt.Expires,
total_clients_scheduled,
hunt.Creator,
}

result.Rows = append(result.Rows, &api_proto.Row{Cell: row_data})
opts := vjson.DefaultEncOpts()
serialized, err := json.MarshalWithOptions(row_data, opts)
if err != nil {
continue
}
result.Rows = append(result.Rows, &api_proto.Row{
Json: string(serialized),
})

if uint64(len(result.Rows)) > in.Rows {
break
Expand Down
Loading
Loading