@@ -13,117 +13,114 @@ var assert = require('assert');
13
13
var schedule = ( typeof setImmediate === 'function' ) ?
14
14
setImmediate : process . nextTick ;
15
15
16
- function Mux ( downstream ) {
17
- this . newStreams = [ ] ;
18
- this . oldStreams = [ ] ;
19
- this . blocked = false ;
20
- this . scheduledRead = false ;
21
-
22
- this . out = downstream ;
23
- var self = this ;
24
- downstream . on ( 'drain' , function ( ) {
25
- self . blocked = false ;
26
- self . _readIncoming ( ) ;
27
- } ) ;
28
- }
29
-
30
- // There are 2 states we can be in:
31
-
32
- // - waiting for outbound capacity, which will be signalled by a
33
- // - 'drain' event on the downstream; or,
34
-
35
- // - no packets to send, waiting for an inbound buffer to have
36
- // packets, which will be signalled by a 'readable' event
37
-
38
- // If we write all packets available whenever there is outbound
39
- // capacity, we will either run out of outbound capacity (`#write`
40
- // returns false), or run out of packets (all calls to an
41
- // `inbound.read()` have returned null).
16
+ class Mux {
17
+ constructor ( downstream ) {
18
+ this . newStreams = [ ] ;
19
+ this . oldStreams = [ ] ;
20
+ this . blocked = false ;
21
+ this . scheduledRead = false ;
22
+
23
+ this . out = downstream ;
24
+ var self = this ;
25
+ downstream . on ( 'drain' , function ( ) {
26
+ self . blocked = false ;
27
+ self . _readIncoming ( ) ;
28
+ } ) ;
29
+ }
42
30
43
- Mux . prototype . _readIncoming = function ( ) {
31
+ // There are 2 states we can be in:
32
+ // - waiting for outbound capacity, which will be signalled by a
33
+ // - 'drain' event on the downstream; or,
34
+ // - no packets to send, waiting for an inbound buffer to have
35
+ // packets, which will be signalled by a 'readable' event
36
+ // If we write all packets available whenever there is outbound
37
+ // capacity, we will either run out of outbound capacity (`#write`
38
+ // returns false), or run out of packets (all calls to an
39
+ // `inbound.read()` have returned null).
40
+ _readIncoming ( ) {
41
+
42
+ // We may be sent here speculatively, if an incoming stream has
43
+ // become readable
44
+ if ( this . blocked ) return ;
45
+
46
+ var accepting = true ;
47
+ var out = this . out ;
48
+
49
+ // Try to read a chunk from each stream in turn, until all streams
50
+ // are empty, or we exhaust our ability to accept chunks.
51
+ function roundrobin ( streams ) {
52
+ var s ;
53
+ while ( accepting && ( s = streams . shift ( ) ) ) {
54
+ var chunk = s . read ( ) ;
55
+ if ( chunk !== null ) {
56
+ accepting = out . write ( chunk ) ;
57
+ streams . push ( s ) ;
58
+ }
59
+ }
60
+ }
44
61
45
- // We may be sent here speculatively, if an incoming stream has
46
- // become readable
47
- if ( this . blocked ) return ;
62
+ roundrobin ( this . newStreams ) ;
63
+
64
+ // Either we exhausted the new queues, or we ran out of capacity. If
65
+ // we ran out of capacity, all the remaining new streams (i.e.,
66
+ // those with packets left) become old streams. This effectively
67
+ // prioritises streams that keep their buffers close to empty over
68
+ // those that are constantly near full.
69
+ if ( accepting ) { // all new queues are exhausted, write as many as
70
+ // we can from the old streams
71
+ assert . equal ( 0 , this . newStreams . length ) ;
72
+ roundrobin ( this . oldStreams ) ;
73
+ }
74
+ else { // ran out of room
75
+ assert ( this . newStreams . length > 0 , "Expect some new streams to remain" ) ;
76
+ Array . prototype . push . apply ( this . oldStreams , this . newStreams ) ;
77
+ this . newStreams = [ ] ;
78
+ }
79
+ // We may have exhausted all the old queues, or run out of room;
80
+ // either way, all we need to do is record whether we have capacity
81
+ // or not, so any speculative reads will know
82
+ this . blocked = ! accepting ;
83
+ }
48
84
49
- var accepting = true ;
50
- var out = this . out ;
85
+ _scheduleRead ( ) {
86
+ var self = this ;
51
87
52
- // Try to read a chunk from each stream in turn, until all streams
53
- // are empty, or we exhaust our ability to accept chunks.
54
- function roundrobin ( streams ) {
55
- var s ;
56
- while ( accepting && ( s = streams . shift ( ) ) ) {
57
- var chunk = s . read ( ) ;
58
- if ( chunk !== null ) {
59
- accepting = out . write ( chunk ) ;
60
- streams . push ( s ) ;
61
- }
88
+ if ( ! self . scheduledRead ) {
89
+ schedule ( function ( ) {
90
+ self . scheduledRead = false ;
91
+ self . _readIncoming ( ) ;
92
+ } ) ;
93
+ self . scheduledRead = true ;
62
94
}
63
95
}
64
96
65
- roundrobin ( this . newStreams ) ;
97
+ pipeFrom ( readable ) {
98
+ var self = this ;
66
99
67
- // Either we exhausted the new queues, or we ran out of capacity. If
68
- // we ran out of capacity, all the remaining new streams (i.e.,
69
- // those with packets left) become old streams. This effectively
70
- // prioritises streams that keep their buffers close to empty over
71
- // those that are constantly near full.
72
-
73
- if ( accepting ) { // all new queues are exhausted, write as many as
74
- // we can from the old streams
75
- assert . equal ( 0 , this . newStreams . length ) ;
76
- roundrobin ( this . oldStreams ) ;
77
- }
78
- else { // ran out of room
79
- assert ( this . newStreams . length > 0 , "Expect some new streams to remain" ) ;
80
- Array . prototype . push . apply ( this . oldStreams , this . newStreams ) ;
81
- this . newStreams = [ ] ;
82
- }
83
- // We may have exhausted all the old queues, or run out of room;
84
- // either way, all we need to do is record whether we have capacity
85
- // or not, so any speculative reads will know
86
- this . blocked = ! accepting ;
87
- } ;
88
-
89
- Mux . prototype . _scheduleRead = function ( ) {
90
- var self = this ;
91
-
92
- if ( ! self . scheduledRead ) {
93
- schedule ( function ( ) {
94
- self . scheduledRead = false ;
95
- self . _readIncoming ( ) ;
96
- } ) ;
97
- self . scheduledRead = true ;
98
- }
99
- } ;
100
+ function enqueue ( ) {
101
+ self . newStreams . push ( readable ) ;
102
+ self . _scheduleRead ( ) ;
103
+ }
100
104
101
- Mux . prototype . pipeFrom = function ( readable ) {
102
- var self = this ;
105
+ function cleanup ( ) {
106
+ readable . removeListener ( 'readable' , enqueue ) ;
107
+ readable . removeListener ( 'error' , cleanup ) ;
108
+ readable . removeListener ( 'end' , cleanup ) ;
109
+ readable . removeListener ( 'unpipeFrom' , cleanupIfMe ) ;
110
+ }
111
+ function cleanupIfMe ( dest ) {
112
+ if ( dest === self ) cleanup ( ) ;
113
+ }
103
114
104
- function enqueue ( ) {
105
- self . newStreams . push ( readable ) ;
106
- self . _scheduleRead ( ) ;
115
+ readable . on ( 'unpipeFrom' , cleanupIfMe ) ;
116
+ readable . on ( 'end' , cleanup ) ;
117
+ readable . on ( 'error' , cleanup ) ;
118
+ readable . on ( 'readable' , enqueue ) ;
107
119
}
108
120
109
- function cleanup ( ) {
110
- readable . removeListener ( 'readable' , enqueue ) ;
111
- readable . removeListener ( 'error' , cleanup ) ;
112
- readable . removeListener ( 'end' , cleanup ) ;
113
- readable . removeListener ( 'unpipeFrom' , cleanupIfMe ) ;
114
- }
115
- function cleanupIfMe ( dest ) {
116
- if ( dest === self ) cleanup ( ) ;
121
+ unpipeFrom ( readable ) {
122
+ readable . emit ( 'unpipeFrom' , this ) ;
117
123
}
118
-
119
- readable . on ( 'unpipeFrom' , cleanupIfMe ) ;
120
- readable . on ( 'end' , cleanup ) ;
121
- readable . on ( 'error' , cleanup ) ;
122
- readable . on ( 'readable' , enqueue ) ;
123
- } ;
124
-
125
- Mux . prototype . unpipeFrom = function ( readable ) {
126
- readable . emit ( 'unpipeFrom' , this ) ;
127
- } ;
124
+ }
128
125
129
126
module . exports . Mux = Mux ;
0 commit comments