for( i=0; i<peerCount; ++i )
tr_peerIoRef( peers[i] );
- /* Stop all peers from listening for the socket to be ready for IO.
- * See "Second phase of IO" lower in this function for more info. */
- for( i=0; i<peerCount; ++i )
- tr_peerIoSetEnabled( peers[i], dir, FALSE );
-
/* First phase of IO. Tries to distribute bandwidth fairly to keep faster
* peers from starving the others. Loop through the peers, giving each a
* small chunk of bandwidth. Keep looping until we run out of bandwidth
* This on-demand IO is enabled until (1) the peer runs out of bandwidth,
* or (2) the next tr_bandwidthAllocate() call, when we start over again. */
for( i=0; i<peerCount; ++i )
- if( tr_peerIoHasBandwidthLeft( peers[i], dir ) )
- tr_peerIoSetEnabled( peers[i], dir, TRUE );
+ tr_peerIoSetEnabled( peers[i], dir, tr_peerIoHasBandwidthLeft( peers[i], dir ) );
for( i=0; i<peerCount; ++i )
tr_peerIoUnref( peers[i] );
bytesUsed = tr_peerIoTryWrite( io, limit );
}
- dbgmsg( io, "flushing peer-io, direction %d, limit %zu, bytesUsed %d", (int)dir, limit, bytesUsed );
+ dbgmsg( io, "flushing peer-io, hasFinishedConnecting %d, direction %d, limit %zu, bytesUsed %d", (int)io->hasFinishedConnecting, (int)dir, limit, bytesUsed );
return bytesUsed;
}
static void
event_enable( tr_peerIo * io, short event )
{
- if( event & EV_READ )
+ if( event & EV_READ ) {
+ dbgmsg( io, "enabling libevent ready-to-read polling" );
event_add( &io->event_read, NULL );
+ }
- if( event & EV_WRITE )
+ if( event & EV_WRITE ) {
+ dbgmsg( io, "enabling libevent ready-to-write polling" );
event_add( &io->event_write, NULL );
+ }
}
static void
event_disable( struct tr_peerIo * io, short event )
{
- if( event & EV_READ )
+ if( event & EV_READ ) {
+ dbgmsg( io, "disabling libevent ready-to-read polling" );
event_del( &io->event_read );
+ }
- if( event & EV_WRITE )
+ if( event & EV_WRITE ) {
+ dbgmsg( io, "disabling libevent ready-to-write polling" );
event_del( &io->event_write );
+ }
}
size_t tr_peerIoGetWriteBufferSpace( const tr_peerIo * io, uint64_t now );
static TR_INLINE void tr_peerIoSetParent( tr_peerIo * io,
- struct tr_bandwidth * parent )
+ struct tr_bandwidth * parent )
{
assert( tr_isPeerIo( io ) );
int isPieceData );
static TR_INLINE tr_bool tr_peerIoHasBandwidthLeft( const tr_peerIo * io,
- tr_direction dir )
+ tr_direction dir )
{
assert( tr_isPeerIo( io ) );
- return tr_bandwidthClamp( &io->bandwidth, dir, 1024 ) > 0;
+ return !io->hasFinishedConnecting
+ || ( tr_bandwidthClamp( &io->bandwidth, dir, 1024 ) > 0 );
}
static TR_INLINE double tr_peerIoGetPieceSpeed( const tr_peerIo * io, uint64_t now, tr_direction dir )