forked from anacrolix/torrent
-
Notifications
You must be signed in to change notification settings - Fork 0
/
reader.go
332 lines (301 loc) · 8.46 KB
/
reader.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
package torrent
import (
"context"
"errors"
"fmt"
"io"
"sync"
"github.com/anacrolix/log"
"github.com/anacrolix/missinggo/v2"
)
// Accesses Torrent data via a Client. Reads block until the data is available. Seeks and readahead
// also drive Client behaviour.
type Reader interface {
io.ReadSeekCloser
missinggo.ReadContexter
// Configure the number of bytes ahead of a read that should also be prioritized in preparation
// for further reads. Overridden by non-nil readahead func, see SetReadaheadFunc.
SetReadahead(int64)
// If non-nil, the provided function is called when the implementation needs to know the
// readahead for the current reader. Calls occur during Reads and Seeks, and while the Client is
// locked.
SetReadaheadFunc(ReadaheadFunc)
// Don't wait for pieces to complete and be verified. Read calls return as soon as they can when
// the underlying chunks become available.
SetResponsive()
}
// Piece range by piece index, [begin, end).
type pieceRange struct {
begin, end pieceIndex
}
type ReadaheadContext struct {
ContiguousReadStartPos int64
CurrentPos int64
}
// Returns the desired readahead for a Reader.
type ReadaheadFunc func(ReadaheadContext) int64
type reader struct {
t *Torrent
// Adjust the read/seek window to handle Readers locked to File extents and the like.
offset, length int64
// Function to dynamically calculate readahead. If nil, readahead is static.
readaheadFunc ReadaheadFunc
// Required when modifying pos and readahead.
mu sync.Locker
readahead, pos int64
// Position that reads have continued contiguously from.
contiguousReadStartPos int64
// The cached piece range this reader wants downloaded. The zero value corresponds to nothing.
// We cache this so that changes can be detected, and bubbled up to the Torrent only as
// required.
pieces pieceRange
// Reads have been initiated since the last seek. This is used to prevent readaheads occurring
// after a seek or with a new reader at the starting position.
reading bool
responsive bool
}
var _ io.ReadSeekCloser = (*reader)(nil)
func (r *reader) SetResponsive() {
r.responsive = true
r.t.cl.event.Broadcast()
}
// Disable responsive mode. TODO: Remove?
func (r *reader) SetNonResponsive() {
r.responsive = false
r.t.cl.event.Broadcast()
}
func (r *reader) SetReadahead(readahead int64) {
r.mu.Lock()
r.readahead = readahead
r.readaheadFunc = nil
r.posChanged()
r.mu.Unlock()
}
func (r *reader) SetReadaheadFunc(f ReadaheadFunc) {
r.mu.Lock()
r.readaheadFunc = f
r.posChanged()
r.mu.Unlock()
}
// How many bytes are available to read. Max is the most we could require.
func (r *reader) available(off, max int64) (ret int64) {
off += r.offset
for max > 0 {
req, ok := r.t.offsetRequest(off)
if !ok {
break
}
if !r.responsive && !r.t.pieceComplete(pieceIndex(req.Index)) {
break
}
if !r.t.haveChunk(req) {
break
}
len1 := int64(req.Length) - (off - r.t.requestOffset(req))
max -= len1
ret += len1
off += len1
}
// Ensure that ret hasn't exceeded our original max.
if max < 0 {
ret += max
}
return
}
// Calculates the pieces this reader wants downloaded, ignoring the cached value at r.pieces.
func (r *reader) piecesUncached() (ret pieceRange) {
ra := r.readahead
if r.readaheadFunc != nil {
ra = r.readaheadFunc(ReadaheadContext{
ContiguousReadStartPos: r.contiguousReadStartPos,
CurrentPos: r.pos,
})
}
if ra < 1 {
// Needs to be at least 1, because [x, x) means we don't want
// anything.
ra = 1
}
if !r.reading {
ra = 0
}
if ra > r.length-r.pos {
ra = r.length - r.pos
}
ret.begin, ret.end = r.t.byteRegionPieces(r.torrentOffset(r.pos), ra)
return
}
func (r *reader) Read(b []byte) (n int, err error) {
return r.ReadContext(context.Background(), b)
}
func (r *reader) ReadContext(ctx context.Context, b []byte) (n int, err error) {
if len(b) > 0 {
r.reading = true
// TODO: Rework reader piece priorities so we don't have to push updates in to the Client
// and take the lock here.
r.mu.Lock()
r.posChanged()
r.mu.Unlock()
}
n, err = r.readOnceAt(ctx, b, r.pos)
if n == 0 {
if err == nil && len(b) > 0 {
panic("expected error")
} else {
return
}
}
r.mu.Lock()
r.pos += int64(n)
r.posChanged()
r.mu.Unlock()
if r.pos >= r.length {
err = io.EOF
} else if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return
}
var closedChan = make(chan struct{})
func init() {
close(closedChan)
}
// Wait until some data should be available to read. Tickles the client if it isn't. Returns how
// much should be readable without blocking.
func (r *reader) waitAvailable(ctx context.Context, pos, wanted int64, wait bool) (avail int64, err error) {
t := r.t
for {
r.t.cl.rLock()
avail = r.available(pos, wanted)
readerCond := t.piece(int((r.offset + pos) / t.info.PieceLength)).readerCond.Signaled()
r.t.cl.rUnlock()
if avail != 0 {
return
}
var dontWait <-chan struct{}
if !wait || wanted == 0 {
dontWait = closedChan
}
select {
case <-r.t.closed.Done():
err = errors.New("torrent closed")
return
case <-ctx.Done():
err = ctx.Err()
return
case <-r.t.dataDownloadDisallowed.On():
err = errors.New("torrent data downloading disabled")
case <-r.t.networkingEnabled.Off():
err = errors.New("torrent networking disabled")
return
case <-dontWait:
return
case <-readerCond:
}
}
}
// Adds the reader's torrent offset to the reader object offset (for example the reader might be
// constrainted to a particular file within the torrent).
func (r *reader) torrentOffset(readerPos int64) int64 {
return r.offset + readerPos
}
// Performs at most one successful read to torrent storage.
func (r *reader) readOnceAt(ctx context.Context, b []byte, pos int64) (n int, err error) {
if pos >= r.length {
err = io.EOF
return
}
for {
var avail int64
avail, err = r.waitAvailable(ctx, pos, int64(len(b)), n == 0)
if avail == 0 {
return
}
firstPieceIndex := pieceIndex(r.torrentOffset(pos) / r.t.info.PieceLength)
firstPieceOffset := r.torrentOffset(pos) % r.t.info.PieceLength
b1 := missinggo.LimitLen(b, avail)
n, err = r.t.readAt(b1, r.torrentOffset(pos))
if n != 0 {
err = nil
return
}
if r.t.closed.IsSet() {
err = fmt.Errorf("reading from closed torrent: %w", err)
return
}
r.t.cl.lock()
// I think there's a panic here caused by the Client being closed before obtaining this
// lock. TestDropTorrentWithMmapStorageWhileHashing seems to tickle occasionally in CI.
func() {
// Just add exceptions already.
defer r.t.cl.unlock()
if r.t.closed.IsSet() {
// Can't update because Torrent's piece order is removed from Client.
return
}
// TODO: Just reset pieces in the readahead window. This might help
// prevent thrashing with small caches and file and piece priorities.
r.log(log.Fstr("error reading torrent %s piece %d offset %d, %d bytes: %v",
r.t.infoHash.HexString(), firstPieceIndex, firstPieceOffset, len(b1), err))
if !r.t.updatePieceCompletion(firstPieceIndex) {
r.log(log.Fstr("piece %d completion unchanged", firstPieceIndex))
}
// Update the rest of the piece completions in the readahead window, without alerting to
// changes (since only the first piece, the one above, could have generated the read error
// we're currently handling).
if r.pieces.begin != firstPieceIndex {
panic(fmt.Sprint(r.pieces.begin, firstPieceIndex))
}
for index := r.pieces.begin + 1; index < r.pieces.end; index++ {
r.t.updatePieceCompletion(index)
}
}()
}
}
// Hodor
func (r *reader) Close() error {
r.t.cl.lock()
r.t.deleteReader(r)
r.t.cl.unlock()
return nil
}
func (r *reader) posChanged() {
to := r.piecesUncached()
from := r.pieces
if to == from {
return
}
r.pieces = to
// log.Printf("reader pos changed %v->%v", from, to)
r.t.readerPosChanged(from, to)
}
func (r *reader) Seek(off int64, whence int) (newPos int64, err error) {
switch whence {
case io.SeekStart:
newPos = off
r.mu.Lock()
case io.SeekCurrent:
r.mu.Lock()
newPos = r.pos + off
case io.SeekEnd:
newPos = r.length + off
r.mu.Lock()
default:
return 0, errors.New("bad whence")
}
if newPos != r.pos {
r.reading = false
r.pos = newPos
r.contiguousReadStartPos = newPos
r.posChanged()
}
r.mu.Unlock()
return
}
func (r *reader) log(m log.Msg) {
r.t.logger.LogLevel(log.Debug, m.Skip(1))
}
// Implementation inspired by https://news.ycombinator.com/item?id=27019613.
func defaultReadaheadFunc(r ReadaheadContext) int64 {
return r.CurrentPos - r.ContiguousReadStartPos
}