Skip to content

Commit

Permalink
More tests
Browse files Browse the repository at this point in the history
  • Loading branch information
danlapid authored Sep 6, 2022
1 parent 84fb58c commit eddf16e
Show file tree
Hide file tree
Showing 10 changed files with 335 additions and 40 deletions.
87 changes: 87 additions & 0 deletions pkg/fecdecoder/fecdecoder_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
package fecdecoder

import (
"bytes"
"context"
"oneway-filesync/pkg/structs"
"strings"
"testing"
"time"

"github.com/klauspost/reedsolomon"
"github.com/sirupsen/logrus"
)

func createChunks(t *testing.T, required int, total int) []*structs.Chunk {
fec, err := reedsolomon.New(required, total-required)
if err != nil {
t.Fatal(err)
}
shares, err := fec.Split(make([]byte, 400))
if err != nil {
t.Fatal(err)
}

// Encode the parity set
err = fec.Encode(shares)
if err != nil {
t.Fatal(err)
}
chunks := make([]*structs.Chunk, total)
for i, sharedata := range shares {
chunks[i] = &structs.Chunk{
ShareIndex: uint32(i),
Data: sharedata,
}
}
return chunks

}

func Test_worker(t *testing.T) {
type args struct {
required int
total int
input []*structs.Chunk
}
tests := []struct {
name string
args args
wantErr bool
expectedErr string
}{
{"test-works", args{2, 4, createChunks(t, 2, 4)}, false, ""},
{"test-too-few-shards", args{4, 8, createChunks(t, 4, 8)[:3]}, true, "Error FEC decoding shares: too few shards given"},
{"test-invalid-fec1", args{2, 1, make([]*structs.Chunk, 4)}, true, "Error creating fec object: cannot create Encoder with less than one data shard or less than zero parity shards"},
{"test-invalid-fec2", args{0, 1, make([]*structs.Chunk, 4)}, true, "Error creating fec object: cannot create Encoder with less than one data shard or less than zero parity shards"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var memLog bytes.Buffer
logrus.SetOutput(&memLog)

input := make(chan []*structs.Chunk, 5)
output := make(chan *structs.Chunk, 5)

input <- tt.args.input

conf := fecDecoderConfig{tt.args.required, tt.args.total, input, output}
ctx, cancel := context.WithCancel(context.Background())
go func() {
time.Sleep(2 * time.Second)
cancel()
}()
// ch <- tt.args.file
worker(ctx, &conf)

if tt.wantErr {
if !strings.Contains(memLog.String(), tt.expectedErr) {
t.Fatalf("Expected not in log, '%v' not in '%v'", tt.expectedErr, memLog.String())
}
} else {
<-output
}

})
}
}
20 changes: 9 additions & 11 deletions pkg/fecencoder/fecencoder.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,10 @@ import (
)

type fecEncoderConfig struct {
chunksize int
required int
total int
input chan *structs.Chunk
output chan *structs.Chunk
required int
total int
input chan *structs.Chunk
output chan *structs.Chunk
}

// FEC routine:
Expand Down Expand Up @@ -71,13 +70,12 @@ func worker(ctx context.Context, conf *fecEncoderConfig) {
}
}

func CreateFecEncoder(ctx context.Context, chunksize int, required int, total int, input chan *structs.Chunk, output chan *structs.Chunk, workercount int) {
func CreateFecEncoder(ctx context.Context, required int, total int, input chan *structs.Chunk, output chan *structs.Chunk, workercount int) {
conf := fecEncoderConfig{
chunksize: chunksize,
required: required,
total: total,
input: input,
output: output,
required: required,
total: total,
input: input,
output: output,
}
for i := 0; i < workercount; i++ {
go worker(ctx, &conf)
Expand Down
61 changes: 61 additions & 0 deletions pkg/fecencoder/fecencoder_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
package fecencoder

import (
"bytes"
"context"
"oneway-filesync/pkg/structs"
"strings"
"testing"
"time"

"github.com/sirupsen/logrus"
)

func Test_worker(t *testing.T) {
type args struct {
required int
total int
input *structs.Chunk
}
tests := []struct {
name string
args args
wantErr bool
expectedErr string
}{
{"test-works", args{2, 4, &structs.Chunk{Data: make([]byte, 400)}}, false, ""},
{"test-shortdata1", args{2, 4, &structs.Chunk{Data: make([]byte, 0)}}, true, "Error splitting chunk: not enough data to fill the number of requested shards"},
{"test-invalid-fec1", args{2, 1, &structs.Chunk{}}, true, "Error creating fec object: cannot create Encoder with less than one data shard or less than zero parity shards"},
{"test-invalid-fec2", args{0, 1, &structs.Chunk{}}, true, "Error creating fec object: cannot create Encoder with less than one data shard or less than zero parity shards"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var memLog bytes.Buffer
logrus.SetOutput(&memLog)

input := make(chan *structs.Chunk, 5)
output := make(chan *structs.Chunk, 5)

input <- tt.args.input

conf := fecEncoderConfig{tt.args.required, tt.args.total, input, output}
ctx, cancel := context.WithCancel(context.Background())
go func() {
time.Sleep(2 * time.Second)
cancel()
}()
// ch <- tt.args.file
worker(ctx, &conf)

if tt.wantErr {
if !strings.Contains(memLog.String(), tt.expectedErr) {
t.Fatalf("Expected not in log, '%v' not in '%v'", tt.expectedErr, memLog.String())
}
} else {
for i := 0; i < conf.total; i++ {
<-output
}
}
})
}
}
2 changes: 1 addition & 1 deletion pkg/filecloser/filecloser_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ func Test_worker(t *testing.T) {
worker(ctx, &conf)

if !strings.Contains(memLog.String(), tt.expected) {
t.Fatalf("Expected not in log, '%v' not in '%vs'", tt.expected, memLog.String())
t.Fatalf("Expected not in log, '%v' not in '%v'", tt.expected, memLog.String())
}
})
}
Expand Down
25 changes: 1 addition & 24 deletions pkg/filereader/filereader_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,31 +118,8 @@ func Test_worker(t *testing.T) {
worker(ctx, tt.args.conf)

if !strings.Contains(memLog.String(), tt.expected) {
t.Fatalf("Expected not in log, '%v' not in '%vs'", tt.expected, memLog.String())
t.Fatalf("Expected not in log, '%v' not in '%v'", tt.expected, memLog.String())
}
})
}
}

func TestCreateFileReader(t *testing.T) {
type args struct {
ctx context.Context
db *gorm.DB
chunksize int
required int
input chan database.File
output chan *structs.Chunk
workercount int
}
tests := []struct {
name string
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
CreateFileReader(tt.args.ctx, tt.args.db, tt.args.chunksize, tt.args.required, tt.args.input, tt.args.output, tt.args.workercount)
})
}
}
2 changes: 1 addition & 1 deletion pkg/sender/sender.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ func Sender(ctx context.Context, db *gorm.DB, conf config.Config) {

queuereader.CreateQueueReader(ctx, db, queue_chan)
filereader.CreateFileReader(ctx, db, conf.ChunkSize, conf.ChunkFecRequired, queue_chan, chunks_chan, maxprocs)
fecencoder.CreateFecEncoder(ctx, conf.ChunkSize, conf.ChunkFecRequired, conf.ChunkFecTotal, chunks_chan, shares_chan, maxprocs)
fecencoder.CreateFecEncoder(ctx, conf.ChunkFecRequired, conf.ChunkFecTotal, chunks_chan, shares_chan, maxprocs)
bandwidthlimiter.CreateBandwidthLimiter(ctx, conf.BandwidthLimit, conf.ChunkSize, shares_chan, bw_limited_chunks, maxprocs)
udpsender.CreateUdpSender(ctx, conf.ReceiverIP, conf.ReceiverPort, bw_limited_chunks, maxprocs)
}
4 changes: 2 additions & 2 deletions pkg/structs/structs.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ import (
"github.com/zhuangsirui/binpacker"
)

const HASHSIZE = 32 // Using the sha256.Size as const directly causes linting issues
const HASHSIZE = sha256.Size

func HashFile(f *os.File, encrypted bool) ([HASHSIZE]byte, error) {
var ret [HASHSIZE]byte
Expand All @@ -35,7 +35,7 @@ func HashFile(f *os.File, encrypted bool) ([HASHSIZE]byte, error) {

type Chunk struct {
Path string
Hash [HASHSIZE]byte
Hash [32]byte // Not using the HASHSIZE const as it causes linting issues
Encrypted bool
DataOffset int64
DataPadding uint32
Expand Down
78 changes: 78 additions & 0 deletions pkg/udpsender/udpsender_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
package udpsender

import (
"bytes"
"context"
"crypto/rand"
"math/big"
"net"
"oneway-filesync/pkg/structs"
"strings"
"testing"
"time"

"github.com/sirupsen/logrus"
)

func randint(max int64) int {
nBig, err := rand.Int(rand.Reader, big.NewInt(max))
if err != nil {
panic(err)
}
return int(nBig.Int64())
}

func Test_worker(t *testing.T) {
ip := "127.0.0.1"
port := randint(30000) + 30000
addr := net.UDPAddr{
IP: net.ParseIP(ip),
Port: port,
}

receiving_conn, err := net.ListenUDP("udp", &addr)
if err != nil {
t.Fatal(err)
}
defer receiving_conn.Close()

type args struct {
ip string
port int
chunk structs.Chunk
}
tests := []struct {
name string
args args
wantErr bool
expectedErr string
}{
{"test-works", args{ip, port, structs.Chunk{}}, false, ""},
{"test-socket-err", args{ip, 88888, structs.Chunk{}}, true, "Error creating udp socket"},
{"test-message-too-long", args{ip, port, structs.Chunk{Data: make([]byte, 100*1024)}}, true, "Error sending share: write udp"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var memLog bytes.Buffer
logrus.SetOutput(&memLog)

input := make(chan *structs.Chunk, 5)
input <- &tt.args.chunk
conf := udpSenderConfig{tt.args.ip, tt.args.port, input}
ctx, cancel := context.WithCancel(context.Background())
go func() {
time.Sleep(2 * time.Second)
cancel()
}()
worker(ctx, &conf)

if tt.wantErr {
if !strings.Contains(memLog.String(), tt.expectedErr) {
t.Fatalf("Expected not in log, '%v' not in '%v'", tt.expectedErr, memLog.String())
}
} else {
_, _ = receiving_conn.Read(make([]byte, 8192))
}
})
}
}
2 changes: 1 addition & 1 deletion pkg/watcher/watcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ func worker(ctx context.Context, conf *watcherConfig) {
delete(conf.cache, path)
err := database.QueueFileForSending(conf.db, path, conf.encrypted)
if err != nil {
logrus.Errorf("%v", err)
logrus.Errorf("Failed to queue file for sending: %v", err)
} else {
logrus.Infof("File '%s' queued for sending", path)
}
Expand Down
Loading

0 comments on commit eddf16e

Please sign in to comment.