From eddf16e183a67e06a16078e94e5295d2e7d7d7a9 Mon Sep 17 00:00:00 2001 From: Dan Lapid Date: Tue, 6 Sep 2022 16:21:47 +0300 Subject: [PATCH] More tests --- pkg/fecdecoder/fecdecoder_test.go | 87 ++++++++++++++++++++++++++++ pkg/fecencoder/fecencoder.go | 20 +++---- pkg/fecencoder/fecencoder_test.go | 61 ++++++++++++++++++++ pkg/filecloser/filecloser_test.go | 2 +- pkg/filereader/filereader_test.go | 25 +------- pkg/sender/sender.go | 2 +- pkg/structs/structs.go | 4 +- pkg/udpsender/udpsender_test.go | 78 +++++++++++++++++++++++++ pkg/watcher/watcher.go | 2 +- pkg/watcher/watcher_test.go | 94 +++++++++++++++++++++++++++++++ 10 files changed, 335 insertions(+), 40 deletions(-) create mode 100644 pkg/fecdecoder/fecdecoder_test.go create mode 100644 pkg/fecencoder/fecencoder_test.go create mode 100644 pkg/udpsender/udpsender_test.go create mode 100644 pkg/watcher/watcher_test.go diff --git a/pkg/fecdecoder/fecdecoder_test.go b/pkg/fecdecoder/fecdecoder_test.go new file mode 100644 index 0000000..bbb9266 --- /dev/null +++ b/pkg/fecdecoder/fecdecoder_test.go @@ -0,0 +1,87 @@ +package fecdecoder + +import ( + "bytes" + "context" + "oneway-filesync/pkg/structs" + "strings" + "testing" + "time" + + "github.com/klauspost/reedsolomon" + "github.com/sirupsen/logrus" +) + +func createChunks(t *testing.T, required int, total int) []*structs.Chunk { + fec, err := reedsolomon.New(required, total-required) + if err != nil { + t.Fatal(err) + } + shares, err := fec.Split(make([]byte, 400)) + if err != nil { + t.Fatal(err) + } + + // Encode the parity set + err = fec.Encode(shares) + if err != nil { + t.Fatal(err) + } + chunks := make([]*structs.Chunk, total) + for i, sharedata := range shares { + chunks[i] = &structs.Chunk{ + ShareIndex: uint32(i), + Data: sharedata, + } + } + return chunks + +} + +func Test_worker(t *testing.T) { + type args struct { + required int + total int + input []*structs.Chunk + } + tests := []struct { + name string + args args + wantErr bool + expectedErr string + }{ + {"test-works", args{2, 4, createChunks(t, 2, 4)}, false, ""}, + {"test-too-few-shards", args{4, 8, createChunks(t, 4, 8)[:3]}, true, "Error FEC decoding shares: too few shards given"}, + {"test-invalid-fec1", args{2, 1, make([]*structs.Chunk, 4)}, true, "Error creating fec object: cannot create Encoder with less than one data shard or less than zero parity shards"}, + {"test-invalid-fec2", args{0, 1, make([]*structs.Chunk, 4)}, true, "Error creating fec object: cannot create Encoder with less than one data shard or less than zero parity shards"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var memLog bytes.Buffer + logrus.SetOutput(&memLog) + + input := make(chan []*structs.Chunk, 5) + output := make(chan *structs.Chunk, 5) + + input <- tt.args.input + + conf := fecDecoderConfig{tt.args.required, tt.args.total, input, output} + ctx, cancel := context.WithCancel(context.Background()) + go func() { + time.Sleep(2 * time.Second) + cancel() + }() + // ch <- tt.args.file + worker(ctx, &conf) + + if tt.wantErr { + if !strings.Contains(memLog.String(), tt.expectedErr) { + t.Fatalf("Expected not in log, '%v' not in '%v'", tt.expectedErr, memLog.String()) + } + } else { + <-output + } + + }) + } +} diff --git a/pkg/fecencoder/fecencoder.go b/pkg/fecencoder/fecencoder.go index 8609afb..27512fd 100644 --- a/pkg/fecencoder/fecencoder.go +++ b/pkg/fecencoder/fecencoder.go @@ -10,11 +10,10 @@ import ( ) type fecEncoderConfig struct { - chunksize int - required int - total int - input chan *structs.Chunk - output chan *structs.Chunk + required int + total int + input chan *structs.Chunk + output chan *structs.Chunk } // FEC routine: @@ -71,13 +70,12 @@ func worker(ctx context.Context, conf *fecEncoderConfig) { } } -func CreateFecEncoder(ctx context.Context, chunksize int, required int, total int, input chan *structs.Chunk, output chan *structs.Chunk, workercount int) { +func CreateFecEncoder(ctx context.Context, required int, total int, input chan *structs.Chunk, output chan *structs.Chunk, workercount int) { conf := fecEncoderConfig{ - chunksize: chunksize, - required: required, - total: total, - input: input, - output: output, + required: required, + total: total, + input: input, + output: output, } for i := 0; i < workercount; i++ { go worker(ctx, &conf) diff --git a/pkg/fecencoder/fecencoder_test.go b/pkg/fecencoder/fecencoder_test.go new file mode 100644 index 0000000..d808168 --- /dev/null +++ b/pkg/fecencoder/fecencoder_test.go @@ -0,0 +1,61 @@ +package fecencoder + +import ( + "bytes" + "context" + "oneway-filesync/pkg/structs" + "strings" + "testing" + "time" + + "github.com/sirupsen/logrus" +) + +func Test_worker(t *testing.T) { + type args struct { + required int + total int + input *structs.Chunk + } + tests := []struct { + name string + args args + wantErr bool + expectedErr string + }{ + {"test-works", args{2, 4, &structs.Chunk{Data: make([]byte, 400)}}, false, ""}, + {"test-shortdata1", args{2, 4, &structs.Chunk{Data: make([]byte, 0)}}, true, "Error splitting chunk: not enough data to fill the number of requested shards"}, + {"test-invalid-fec1", args{2, 1, &structs.Chunk{}}, true, "Error creating fec object: cannot create Encoder with less than one data shard or less than zero parity shards"}, + {"test-invalid-fec2", args{0, 1, &structs.Chunk{}}, true, "Error creating fec object: cannot create Encoder with less than one data shard or less than zero parity shards"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var memLog bytes.Buffer + logrus.SetOutput(&memLog) + + input := make(chan *structs.Chunk, 5) + output := make(chan *structs.Chunk, 5) + + input <- tt.args.input + + conf := fecEncoderConfig{tt.args.required, tt.args.total, input, output} + ctx, cancel := context.WithCancel(context.Background()) + go func() { + time.Sleep(2 * time.Second) + cancel() + }() + // ch <- tt.args.file + worker(ctx, &conf) + + if tt.wantErr { + if !strings.Contains(memLog.String(), tt.expectedErr) { + t.Fatalf("Expected not in log, '%v' not in '%v'", tt.expectedErr, memLog.String()) + } + } else { + for i := 0; i < conf.total; i++ { + <-output + } + } + }) + } +} diff --git a/pkg/filecloser/filecloser_test.go b/pkg/filecloser/filecloser_test.go index b2d9493..71004bd 100644 --- a/pkg/filecloser/filecloser_test.go +++ b/pkg/filecloser/filecloser_test.go @@ -122,7 +122,7 @@ func Test_worker(t *testing.T) { worker(ctx, &conf) if !strings.Contains(memLog.String(), tt.expected) { - t.Fatalf("Expected not in log, '%v' not in '%vs'", tt.expected, memLog.String()) + t.Fatalf("Expected not in log, '%v' not in '%v'", tt.expected, memLog.String()) } }) } diff --git a/pkg/filereader/filereader_test.go b/pkg/filereader/filereader_test.go index ca5cc02..47ad280 100644 --- a/pkg/filereader/filereader_test.go +++ b/pkg/filereader/filereader_test.go @@ -118,31 +118,8 @@ func Test_worker(t *testing.T) { worker(ctx, tt.args.conf) if !strings.Contains(memLog.String(), tt.expected) { - t.Fatalf("Expected not in log, '%v' not in '%vs'", tt.expected, memLog.String()) + t.Fatalf("Expected not in log, '%v' not in '%v'", tt.expected, memLog.String()) } }) } } - -func TestCreateFileReader(t *testing.T) { - type args struct { - ctx context.Context - db *gorm.DB - chunksize int - required int - input chan database.File - output chan *structs.Chunk - workercount int - } - tests := []struct { - name string - args args - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - CreateFileReader(tt.args.ctx, tt.args.db, tt.args.chunksize, tt.args.required, tt.args.input, tt.args.output, tt.args.workercount) - }) - } -} diff --git a/pkg/sender/sender.go b/pkg/sender/sender.go index 6c62b82..28cb5ad 100644 --- a/pkg/sender/sender.go +++ b/pkg/sender/sender.go @@ -24,7 +24,7 @@ func Sender(ctx context.Context, db *gorm.DB, conf config.Config) { queuereader.CreateQueueReader(ctx, db, queue_chan) filereader.CreateFileReader(ctx, db, conf.ChunkSize, conf.ChunkFecRequired, queue_chan, chunks_chan, maxprocs) - fecencoder.CreateFecEncoder(ctx, conf.ChunkSize, conf.ChunkFecRequired, conf.ChunkFecTotal, chunks_chan, shares_chan, maxprocs) + fecencoder.CreateFecEncoder(ctx, conf.ChunkFecRequired, conf.ChunkFecTotal, chunks_chan, shares_chan, maxprocs) bandwidthlimiter.CreateBandwidthLimiter(ctx, conf.BandwidthLimit, conf.ChunkSize, shares_chan, bw_limited_chunks, maxprocs) udpsender.CreateUdpSender(ctx, conf.ReceiverIP, conf.ReceiverPort, bw_limited_chunks, maxprocs) } diff --git a/pkg/structs/structs.go b/pkg/structs/structs.go index 713037c..9d990c1 100644 --- a/pkg/structs/structs.go +++ b/pkg/structs/structs.go @@ -12,7 +12,7 @@ import ( "github.com/zhuangsirui/binpacker" ) -const HASHSIZE = 32 // Using the sha256.Size as const directly causes linting issues +const HASHSIZE = sha256.Size func HashFile(f *os.File, encrypted bool) ([HASHSIZE]byte, error) { var ret [HASHSIZE]byte @@ -35,7 +35,7 @@ func HashFile(f *os.File, encrypted bool) ([HASHSIZE]byte, error) { type Chunk struct { Path string - Hash [HASHSIZE]byte + Hash [32]byte // Not using the HASHSIZE const as it causes linting issues Encrypted bool DataOffset int64 DataPadding uint32 diff --git a/pkg/udpsender/udpsender_test.go b/pkg/udpsender/udpsender_test.go new file mode 100644 index 0000000..95bb24f --- /dev/null +++ b/pkg/udpsender/udpsender_test.go @@ -0,0 +1,78 @@ +package udpsender + +import ( + "bytes" + "context" + "crypto/rand" + "math/big" + "net" + "oneway-filesync/pkg/structs" + "strings" + "testing" + "time" + + "github.com/sirupsen/logrus" +) + +func randint(max int64) int { + nBig, err := rand.Int(rand.Reader, big.NewInt(max)) + if err != nil { + panic(err) + } + return int(nBig.Int64()) +} + +func Test_worker(t *testing.T) { + ip := "127.0.0.1" + port := randint(30000) + 30000 + addr := net.UDPAddr{ + IP: net.ParseIP(ip), + Port: port, + } + + receiving_conn, err := net.ListenUDP("udp", &addr) + if err != nil { + t.Fatal(err) + } + defer receiving_conn.Close() + + type args struct { + ip string + port int + chunk structs.Chunk + } + tests := []struct { + name string + args args + wantErr bool + expectedErr string + }{ + {"test-works", args{ip, port, structs.Chunk{}}, false, ""}, + {"test-socket-err", args{ip, 88888, structs.Chunk{}}, true, "Error creating udp socket"}, + {"test-message-too-long", args{ip, port, structs.Chunk{Data: make([]byte, 100*1024)}}, true, "Error sending share: write udp"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var memLog bytes.Buffer + logrus.SetOutput(&memLog) + + input := make(chan *structs.Chunk, 5) + input <- &tt.args.chunk + conf := udpSenderConfig{tt.args.ip, tt.args.port, input} + ctx, cancel := context.WithCancel(context.Background()) + go func() { + time.Sleep(2 * time.Second) + cancel() + }() + worker(ctx, &conf) + + if tt.wantErr { + if !strings.Contains(memLog.String(), tt.expectedErr) { + t.Fatalf("Expected not in log, '%v' not in '%v'", tt.expectedErr, memLog.String()) + } + } else { + _, _ = receiving_conn.Read(make([]byte, 8192)) + } + }) + } +} diff --git a/pkg/watcher/watcher.go b/pkg/watcher/watcher.go index d0baf4e..b25f499 100644 --- a/pkg/watcher/watcher.go +++ b/pkg/watcher/watcher.go @@ -50,7 +50,7 @@ func worker(ctx context.Context, conf *watcherConfig) { delete(conf.cache, path) err := database.QueueFileForSending(conf.db, path, conf.encrypted) if err != nil { - logrus.Errorf("%v", err) + logrus.Errorf("Failed to queue file for sending: %v", err) } else { logrus.Infof("File '%s' queued for sending", path) } diff --git a/pkg/watcher/watcher_test.go b/pkg/watcher/watcher_test.go new file mode 100644 index 0000000..a9b9d8c --- /dev/null +++ b/pkg/watcher/watcher_test.go @@ -0,0 +1,94 @@ +package watcher + +import ( + "bytes" + "context" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/rjeczalik/notify" + "github.com/sirupsen/logrus" + "gorm.io/driver/sqlite" + "gorm.io/gorm" + gormlogger "gorm.io/gorm/logger" +) + +func Test_isDirectory(t *testing.T) { + type args struct { + path string + } + tests := []struct { + name string + args args + want bool + wantErr bool + }{ + {"test-works", args{"."}, true, false}, + {"test-non-existent", args{"nonexistentdir"}, false, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := isDirectory(tt.args.path) + if (err != nil) != tt.wantErr { + t.Errorf("isDirectory() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("isDirectory() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestCreateWatcher_baddir(t *testing.T) { + var memLog bytes.Buffer + logrus.SetOutput(&memLog) + + ctx, cancel := context.WithCancel(context.Background()) + CreateWatcher(ctx, &gorm.DB{}, "nonexistentdir", false, make(chan notify.EventInfo, 5)) + cancel() + + if !strings.Contains(memLog.String(), "Failed to watch dir with error") { + t.Fatalf("Expected not in log, '%v' not in '%v'", "Failed to watch dir with error", memLog.String()) + } +} + +func Test_worker_baddb(t *testing.T) { + var memLog bytes.Buffer + logrus.SetOutput(&memLog) + + db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{Logger: gormlogger.Discard}) + if err != nil { + t.Fatal(err) + } + + conf := watcherConfig{ + db: db, + encrypted: false, + input: make(chan notify.EventInfo, 5), + cache: make(map[string]time.Time), + } + + if err := notify.Watch(filepath.Join(".", "..."), conf.input, notify.Write, notify.Create); err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + err = os.WriteFile("testfile", make([]byte, 20), os.ModePerm) + if err != nil { + t.Error(err) + } + defer os.Remove("testfile") + time.Sleep(60 * time.Second) + cancel() + }() + worker(ctx, &conf) + + if !strings.Contains(memLog.String(), "Failed to queue file for sending:") { + t.Fatalf("Expected not in log, '%v' not in '%v'", "Failed to queue file for sending:", memLog.String()) + } +}