1
+
package provisionerdserver_test
2
+
3
+
import (
4
+
"context"
5
+
crand "crypto/rand"
6
+
"fmt"
7
+
"testing"
8
+
9
+
"github.com/google/uuid"
10
+
"github.com/stretchr/testify/require"
11
+
"golang.org/x/xerrors"
12
+
"storj.io/drpc"
13
+
14
+
"github.com/coder/coder/v2/coderd/database"
15
+
"github.com/coder/coder/v2/coderd/externalauth"
16
+
"github.com/coder/coder/v2/codersdk/drpcsdk"
17
+
proto "github.com/coder/coder/v2/provisionerd/proto"
18
+
sdkproto "github.com/coder/coder/v2/provisionersdk/proto"
19
+
"github.com/coder/coder/v2/testutil"
20
+
)
21
+
22
+
// TestUploadFileLargeModuleFiles tests the UploadFile RPC with large module files
23
+
func TestUploadFileLargeModuleFiles(t *testing.T) {
24
+
t.Parallel()
25
+
26
+
ctx := testutil.Context(t, testutil.WaitMedium)
27
+
28
+
// Create server
29
+
server, db, _, _ := setup(t, false, &overrides{
30
+
externalAuthConfigs: []*externalauth.Config{{}},
31
+
})
32
+
33
+
testSizes := []int{
34
+
0, // Empty file
35
+
512, // A small file
36
+
drpcsdk.MaxMessageSize + 1024, // Just over the limit
37
+
drpcsdk.MaxMessageSize * 2, // 2x the limit
38
+
sdkproto.ChunkSize*3 + 512, // Multiple chunks with partial last
39
+
}
40
+
41
+
for _, size := range testSizes {
42
+
t.Run(fmt.Sprintf("size_%d_bytes", size), func(t *testing.T) {
43
+
t.Parallel()
44
+
45
+
// Generate test module files data
46
+
moduleData := make([]byte, size)
47
+
_, err := crand.Read(moduleData)
48
+
require.NoError(t, err)
49
+
50
+
// Convert to upload format
51
+
upload, chunks := sdkproto.BytesToDataUpload(sdkproto.DataUploadType_UPLOAD_TYPE_MODULE_FILES, moduleData)
52
+
53
+
stream := newMockUploadStream(upload, chunks...)
54
+
55
+
// Execute upload
56
+
err = server.UploadFile(stream)
57
+
require.NoError(t, err)
58
+
59
+
// Upload should be done
60
+
require.True(t, stream.isDone(), "stream should be done after upload")
61
+
62
+
// Verify file was stored in database
63
+
hashString := fmt.Sprintf("%x", upload.DataHash)
64
+
file, err := db.GetFileByHashAndCreator(ctx, database.GetFileByHashAndCreatorParams{
65
+
Hash: hashString,
66
+
CreatedBy: uuid.Nil, // Provisionerd creates with Nil UUID
67
+
})
68
+
require.NoError(t, err)
69
+
require.Equal(t, hashString, file.Hash)
70
+
require.Equal(t, moduleData, file.Data)
71
+
require.Equal(t, "application/x-tar", file.Mimetype)
72
+
73
+
// Try to upload it again, and it should still be successful
74
+
stream = newMockUploadStream(upload, chunks...)
75
+
err = server.UploadFile(stream)
76
+
require.NoError(t, err, "re-upload should succeed without error")
77
+
require.True(t, stream.isDone(), "stream should be done after re-upload")
78
+
})
79
+
}
80
+
}
81
+
82
+
// TestUploadFileErrorScenarios tests various error conditions in file upload
83
+
func TestUploadFileErrorScenarios(t *testing.T) {
84
+
t.Parallel()
85
+
86
+
//nolint:dogsled
87
+
server, _, _, _ := setup(t, false, &overrides{
88
+
externalAuthConfigs: []*externalauth.Config{{}},
89
+
})
90
+
91
+
// Generate test data
92
+
moduleData := make([]byte, sdkproto.ChunkSize*2)
93
+
_, err := crand.Read(moduleData)
94
+
require.NoError(t, err)
95
+
96
+
upload, chunks := sdkproto.BytesToDataUpload(sdkproto.DataUploadType_UPLOAD_TYPE_MODULE_FILES, moduleData)
97
+
98
+
t.Run("chunk_before_upload", func(t *testing.T) {
99
+
t.Parallel()
100
+
101
+
stream := newMockUploadStream(nil, chunks[0])
102
+
103
+
err := server.UploadFile(stream)
104
+
require.ErrorContains(t, err, "unexpected chunk piece while waiting for file upload")
105
+
require.True(t, stream.isDone(), "stream should be done after error")
106
+
})
107
+
108
+
t.Run("duplicate_upload", func(t *testing.T) {
109
+
t.Parallel()
110
+
111
+
stream := &mockUploadStream{
112
+
done: make(chan struct{}),
113
+
messages: make(chan *proto.UploadFileRequest, 2),
114
+
}
115
+
116
+
up := &proto.UploadFileRequest{Type: &proto.UploadFileRequest_DataUpload{DataUpload: upload}}
117
+
118
+
// Send it twice
119
+
stream.messages <- up
120
+
stream.messages <- up
121
+
122
+
err := server.UploadFile(stream)
123
+
require.ErrorContains(t, err, "unexpected file upload while waiting for file completion")
124
+
require.True(t, stream.isDone(), "stream should be done after error")
125
+
})
126
+
127
+
t.Run("unsupported_upload_type", func(t *testing.T) {
128
+
t.Parallel()
129
+
130
+
//nolint:govet // Ignore lock copy
131
+
cpy := *upload
132
+
cpy.UploadType = sdkproto.DataUploadType_UPLOAD_TYPE_UNKNOWN // Set to an unsupported type
133
+
stream := newMockUploadStream(&cpy, chunks...)
134
+
135
+
err := server.UploadFile(stream)
136
+
require.ErrorContains(t, err, "unsupported file upload type")
137
+
require.True(t, stream.isDone(), "stream should be done after error")
138
+
})
139
+
}
140
+
141
+
type mockUploadStream struct {
142
+
done chan struct{}
143
+
messages chan *proto.UploadFileRequest
144
+
}
145
+
146
+
func (m mockUploadStream) SendAndClose(empty *proto.Empty) error {
147
+
close(m.done)
148
+
return nil
149
+
}
150
+
151
+
func (m mockUploadStream) Recv() (*proto.UploadFileRequest, error) {
152
+
msg, ok := <-m.messages
153
+
if !ok {
154
+
return nil, xerrors.New("no more messages to receive")
155
+
}
156
+
return msg, nil
157
+
}
158
+
func (*mockUploadStream) Context() context.Context { panic(errUnimplemented) }
159
+
func (*mockUploadStream) MsgSend(msg drpc.Message, enc drpc.Encoding) error {
160
+
panic(errUnimplemented)
161
+
}
162
+
163
+
func (*mockUploadStream) MsgRecv(msg drpc.Message, enc drpc.Encoding) error {
164
+
panic(errUnimplemented)
165
+
}
166
+
func (*mockUploadStream) CloseSend() error { panic(errUnimplemented) }
167
+
func (*mockUploadStream) Close() error { panic(errUnimplemented) }
168
+
func (m *mockUploadStream) isDone() bool {
169
+
select {
170
+
case <-m.done:
171
+
return true
172
+
default:
173
+
return false
174
+
}
175
+
}
176
+
177
+
func newMockUploadStream(up *sdkproto.DataUpload, chunks ...*sdkproto.ChunkPiece) *mockUploadStream {
178
+
stream := &mockUploadStream{
179
+
done: make(chan struct{}),
180
+
messages: make(chan *proto.UploadFileRequest, 1+len(chunks)),
181
+
}
182
+
if up != nil {
183
+
stream.messages <- &proto.UploadFileRequest{Type: &proto.UploadFileRequest_DataUpload{DataUpload: up}}
184
+
}
185
+
186
+
for _, chunk := range chunks {
187
+
stream.messages <- &proto.UploadFileRequest{Type: &proto.UploadFileRequest_ChunkPiece{ChunkPiece: chunk}}
188
+
}
189
+
close(stream.messages)
190
+
return stream
191
+
}
RetroSearch is an open source project built by @garambo | Open a GitHub Issue
Search and Browse the WWW like it's 1997 | Search results from DuckDuckGo
HTML:
3.2
| Encoding:
UTF-8
| Version:
0.7.4