first commit
This commit is contained in:
commit
95a1cc8c91
5
.gitignore
vendored
Normal file
5
.gitignore
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
.DS_Store
|
||||
*.py[co]
|
||||
*.pid
|
||||
*.log
|
||||
django_cache
|
157
BitTorrent/Choker.py
Executable file
157
BitTorrent/Choker.py
Executable file
@ -0,0 +1,157 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Bram Cohen
|
||||
|
||||
from random import randrange
|
||||
from math import sqrt
|
||||
|
||||
class Choker(object):
|
||||
|
||||
def __init__(self, config, schedule, done = lambda: False):
|
||||
self.config = config
|
||||
self.schedule = schedule
|
||||
self.connections = []
|
||||
self.count = 0
|
||||
self.done = done
|
||||
self.unchokes_since_last = 0
|
||||
schedule(self._round_robin, 10)
|
||||
|
||||
def _round_robin(self):
|
||||
self.schedule(self._round_robin, 10)
|
||||
self.count += 1
|
||||
if self.done():
|
||||
self._rechoke_seed(True)
|
||||
return
|
||||
if self.count % 3 == 0:
|
||||
for i in xrange(len(self.connections)):
|
||||
u = self.connections[i].upload
|
||||
if u.choked and u.interested:
|
||||
self.connections = self.connections[i:] + self.connections[:i]
|
||||
break
|
||||
self._rechoke()
|
||||
|
||||
def _rechoke(self):
|
||||
if self.done():
|
||||
self._rechoke_seed()
|
||||
return
|
||||
preferred = []
|
||||
for i in xrange(len(self.connections)):
|
||||
c = self.connections[i]
|
||||
if c.upload.interested and not c.download.is_snubbed() and c.download.have.numfalse:
|
||||
preferred.append((-c.download.get_rate(), i))
|
||||
preferred.sort()
|
||||
prefcount = min(len(preferred), self._max_uploads() -1)
|
||||
mask = [0] * len(self.connections)
|
||||
for _, i in preferred[:prefcount]:
|
||||
mask[i] = 1
|
||||
count = max(1, self.config['min_uploads'] - prefcount)
|
||||
for i in xrange(len(self.connections)):
|
||||
c = self.connections[i]
|
||||
u = c.upload
|
||||
if mask[i]:
|
||||
u.unchoke(self.count)
|
||||
elif count > 0 and c.download.have.numfalse:
|
||||
u.unchoke(self.count)
|
||||
if u.interested:
|
||||
count -= 1
|
||||
else:
|
||||
u.choke()
|
||||
|
||||
def _rechoke_seed(self, force_new_unchokes = False):
|
||||
if force_new_unchokes:
|
||||
# number of unchokes per 30 second period
|
||||
i = (self._max_uploads() + 2) // 3
|
||||
# this is called 3 times in 30 seconds, if i==4 then unchoke 1+1+2
|
||||
# and so on; substract unchokes recently triggered by disconnects
|
||||
num_force_unchokes = max(0, (i + self.count % 3) // 3 - \
|
||||
self.unchokes_since_last)
|
||||
else:
|
||||
num_force_unchokes = 0
|
||||
preferred = []
|
||||
new_limit = self.count - 3
|
||||
for i in xrange(len(self.connections)):
|
||||
c = self.connections[i]
|
||||
u = c.upload
|
||||
if not u.choked and u.interested and c.download.have.numfalse:
|
||||
if u.unchoke_time > new_limit or (
|
||||
u.buffer and c.connection.is_flushed()):
|
||||
preferred.append((-u.unchoke_time, -u.get_rate(), i))
|
||||
else:
|
||||
preferred.append((1, -u.get_rate(), i))
|
||||
num_kept = self._max_uploads() - num_force_unchokes
|
||||
assert num_kept >= 0
|
||||
preferred.sort()
|
||||
preferred = preferred[:num_kept]
|
||||
mask = [0] * len(self.connections)
|
||||
for _, _, i in preferred:
|
||||
mask[i] = 1
|
||||
num_nonpref = self._max_uploads() - len(preferred)
|
||||
if force_new_unchokes:
|
||||
self.unchokes_since_last = 0
|
||||
else:
|
||||
self.unchokes_since_last += num_nonpref
|
||||
last_unchoked = None
|
||||
for i in xrange(len(self.connections)):
|
||||
c = self.connections[i]
|
||||
u = c.upload
|
||||
if not mask[i]:
|
||||
if not u.interested:
|
||||
u.choke()
|
||||
elif u.choked:
|
||||
if num_nonpref > 0 and c.connection.is_flushed() and c.download.have.numfalse:
|
||||
u.unchoke(self.count)
|
||||
num_nonpref -= 1
|
||||
if num_nonpref == 0:
|
||||
last_unchoked = i
|
||||
else:
|
||||
if num_nonpref == 0 or not c.download.have.numfalse:
|
||||
u.choke()
|
||||
else:
|
||||
num_nonpref -= 1
|
||||
if num_nonpref == 0:
|
||||
last_unchoked = i
|
||||
if last_unchoked is not None:
|
||||
self.connections = self.connections[last_unchoked + 1:] + \
|
||||
self.connections[:last_unchoked + 1]
|
||||
|
||||
def connection_made(self, connection):
|
||||
p = randrange(len(self.connections) + 1)
|
||||
self.connections.insert(p, connection)
|
||||
|
||||
def connection_lost(self, connection):
|
||||
self.connections.remove(connection)
|
||||
if connection.upload.interested and not connection.upload.choked:
|
||||
self._rechoke()
|
||||
|
||||
def interested(self, connection):
|
||||
if not connection.upload.choked:
|
||||
self._rechoke()
|
||||
|
||||
def not_interested(self, connection):
|
||||
if not connection.upload.choked:
|
||||
self._rechoke()
|
||||
|
||||
def _max_uploads(self):
|
||||
uploads = self.config['max_uploads']
|
||||
rate = self.config['max_upload_rate']
|
||||
if uploads > 0:
|
||||
pass
|
||||
elif rate <= 0:
|
||||
uploads = 7 # unlimited, just guess something here...
|
||||
elif rate < 9:
|
||||
uploads = 2
|
||||
elif rate < 15:
|
||||
uploads = 3
|
||||
elif rate < 42:
|
||||
uploads = 4
|
||||
else:
|
||||
uploads = int(sqrt(rate * .6))
|
||||
return uploads
|
156
BitTorrent/ClientIdentifier.py
Executable file
156
BitTorrent/ClientIdentifier.py
Executable file
@ -0,0 +1,156 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
#
|
||||
# Written by Matt Chisholm
|
||||
# Client list updated by Ed Savage-Jones - May 28th 2005
|
||||
|
||||
import re
|
||||
|
||||
v64p = '[\da-zA-Z.-]{3}'
|
||||
|
||||
matches = (
|
||||
('-AZ(?P<version>\d+)-+.+$' , "Azureus" ),
|
||||
('M(?P<version>\d-\d-\d)--.+$' , "BitTorrent" ),
|
||||
('T(?P<version>%s)-+.+$'%v64p , "BitTornado" ),
|
||||
('-TS(?P<version>\d+)-+.+$' , "TorrentStorm" ),
|
||||
('exbc(?P<bcver>.+)LORD.+$' , "BitLord" ),
|
||||
('exbc(?P<bcver>[^-][^-]+)(?!---).+$', "BitComet" ),
|
||||
('-BC0(?P<version>\d+)-.+$' , "BitComet" ),
|
||||
('FUTB(?P<bcver>.+).+$' , "BitComet Mod1" ),
|
||||
('xUTB(?P<bcver>.+).+$' , "BitComet Mod2" ),
|
||||
('A(?P<version>%s)-+.+$'%v64p , "ABC" ),
|
||||
('S(?P<version>%s)-+.+$'%v64p , "Shadow's" ),
|
||||
(chr(0)*12 + 'aa.+$' , "Experimental 3.2.1b2" ),
|
||||
(chr(0)*12 + '.+$' , "BitTorrent (obsolete)"),
|
||||
('-G3.+$' , "G3Torrent" ),
|
||||
('-[Ll][Tt](?P<version>\d+)-+.+$' , "libtorrent" ),
|
||||
('Mbrst(?P<version>\d-\d-\d).+$' , "burst!" ),
|
||||
('eX.+$' , "eXeem" ),
|
||||
('\x00\x02BS.+(?P<strver>UDP0|HTTPBT)$', "BitSpirit v2" ),
|
||||
('\x00[\x02|\x00]BS.+$' , "BitSpirit v2" ),
|
||||
('.*(?P<strver>UDP0|HTTPBT)$' , "BitSpirit" ),
|
||||
('-BOWP?(?P<version>\d+)-.+$' , "Bits on Wheels" ),
|
||||
('(?P<rsver>.+)RSAnonymous.+$' , "Rufus Anonymous" ),
|
||||
('(?P<rsver>.+)RS.+$' , "Rufus" ),
|
||||
('-ML(?P<version>(\d\.)+\d)(?:\.(?P<strver>CVS))?-+.+$',"MLDonkey"),
|
||||
('-UT(?P<version>\d+)-+.+$' , u"\xb5Torrent" ),
|
||||
('346------.+$' , "TorrentTopia 1.70" ),
|
||||
('OP(?P<strver>\d{4}).+$' , "Opera" ),
|
||||
('-S(?P<version>10059)-+.+$' , "S (unknown)" ),
|
||||
# Clients I've never actually seen in a peer list:
|
||||
('exbc..---.+$' , "BitVampire 1.3.1" ),
|
||||
('-BB(?P<version>\d+)-+.+$' , "BitBuddy" ),
|
||||
('-CT(?P<version>\d+)-+.+$' , "CTorrent" ),
|
||||
('-MT(?P<version>\d+)-+.+$' , "MoonlightTorrent" ),
|
||||
('-BX(?P<version>\d+)-+.+$' , "BitTorrent X" ),
|
||||
('-TN(?P<version>\d+)-+.+$' , "TorrentDotNET" ),
|
||||
('-SS(?P<version>\d+)-+.+$' , "SwarmScope" ),
|
||||
('-XT(?P<version>\d+)-+.+$' , "XanTorrent" ),
|
||||
('U(?P<version>\d+)-+.+$' , "UPnP NAT Bit Torrent" ),
|
||||
('-AR(?P<version>\d+)-+.+$' , "Arctic" ),
|
||||
('(?P<rsver>.+)BM.+$' , "BitMagnet" ),
|
||||
('BG(?P<version>\d+).+$' , "BTGetit" ),
|
||||
('-eX(?P<version>[\dA-Fa-f]+)-.+$',"eXeem beta" ),
|
||||
('Plus12(?P<rc>[\dR]+)-.+$' , "Plus! II" ),
|
||||
('XBT(?P<version>\d+)[d-]-.+$' , "XBT" ),
|
||||
('-ZT(?P<version>\d+)-+.+$' , "ZipTorrent" ),
|
||||
('-BitE\?(?P<version>\d+)-.+$' , "BitEruct" ),
|
||||
('O(?P<version>%s)-+.+$'%v64p , "Osprey Permaseed" ),
|
||||
# Guesses based on Rufus source code, never seen in the wild:
|
||||
('-BS(?P<version>\d+)-+.+$' , "BTSlave" ),
|
||||
('-SB(?P<version>\d+)-+.+$' , "SwiftBit" ),
|
||||
('-SN(?P<version>\d+)-+.+$' , "ShareNET" ),
|
||||
('-bk(?P<version>\d+)-+.+$' , "BitKitten" ),
|
||||
('-SZ(?P<version>\d+)-+.+$' , "Shareaza" ),
|
||||
('-KT(?P<version>\d+)(?P<rc>R\d+)-+.+$' , "KTorrent" ),
|
||||
('-MP(?P<version>\d+)-+.+$' , "MooPolice" ),
|
||||
('-PO(?P<version>\d+)-+.+$' , "PO (unknown)" ),
|
||||
('-UR(?P<version>\d+)-+.+$' , "UR (unknown)" ),
|
||||
('Deadman Walking-.+$' , "Deadman" ),
|
||||
('270------.+$' , "GreedBT 2.7.0" ),
|
||||
('XTORR302.+$' , "TorrenTres 0.0.2" ),
|
||||
('turbobt(?P<version>\d\.\d).+$' , "TurboBT" ),
|
||||
('DansClient.+$' , "XanTorrent" ),
|
||||
# Patterns that should be executed last
|
||||
('.*Azureus.*' , "Azureus 2.0.3.2" ),
|
||||
)
|
||||
|
||||
matches = [(re.compile(pattern, re.DOTALL), name) for pattern, name in matches]
|
||||
|
||||
unknown_clients = {}
|
||||
|
||||
def identify_client(peerid, log=None):
|
||||
client = 'unknown'
|
||||
version = ''
|
||||
for pat, name in matches:
|
||||
m = pat.match(peerid)
|
||||
if m:
|
||||
client = name
|
||||
d = m.groupdict()
|
||||
if d.has_key('version'):
|
||||
version = d['version']
|
||||
version = version.replace('-','.')
|
||||
if version.find('.') >= 0:
|
||||
version = ''.join(version.split('.'))
|
||||
|
||||
version = list(version)
|
||||
for i,c in enumerate(version):
|
||||
if '0' <= c <= '9':
|
||||
version[i] = c
|
||||
elif 'A' <= c <= 'Z':
|
||||
version[i] = str(ord(c) - 55)
|
||||
elif 'a' <= c <= 'z':
|
||||
version[i] = str(ord(c) - 61)
|
||||
elif c == '.':
|
||||
version[i] = '62'
|
||||
elif c == '-':
|
||||
version[i] = '63'
|
||||
else:
|
||||
break
|
||||
version = '.'.join(version)
|
||||
elif d.has_key('bcver'):
|
||||
bcver = d['bcver']
|
||||
version += str(ord(bcver[0])) + '.'
|
||||
if len(bcver) > 1:
|
||||
version += str(ord(bcver[1])/10)
|
||||
version += str(ord(bcver[1])%10)
|
||||
elif d.has_key('rsver'):
|
||||
rsver = d['rsver']
|
||||
version += str(ord(rsver[0])) + '.'
|
||||
if len(rsver) > 1:
|
||||
version += str(ord(rsver[1])/10) + '.'
|
||||
version += str(ord(rsver[1])%10)
|
||||
if d.has_key('strver'):
|
||||
if d['strver'] is not None:
|
||||
version += d['strver']
|
||||
if d.has_key('rc'):
|
||||
rc = 'RC ' + d['rc'][1:]
|
||||
if version:
|
||||
version += ' '
|
||||
version += rc
|
||||
break
|
||||
if client == 'unknown':
|
||||
# identify Shareaza 2.0 - 2.1
|
||||
if len(peerid) == 20 and chr(0) not in peerid[:15]:
|
||||
shareaza = True
|
||||
for i in range(16,20):
|
||||
if ord(peerid[i]) != (ord(peerid[i - 16]) ^ ord(peerid[31 - i])):
|
||||
shareaza = False
|
||||
break
|
||||
if shareaza:
|
||||
client = "Shareaza"
|
||||
|
||||
|
||||
if log is not None and 'unknown' in client:
|
||||
if not unknown_clients.has_key(peerid):
|
||||
unknown_clients[peerid] = True
|
||||
log.write('%s\n'%peerid)
|
||||
log.write('------------------------------\n')
|
||||
return client, version
|
337
BitTorrent/Connecter.py
Executable file
337
BitTorrent/Connecter.py
Executable file
@ -0,0 +1,337 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Originally written by Bram Cohen, heavily modified by Uoti Urpala
|
||||
|
||||
# required for python 2.2
|
||||
from __future__ import generators
|
||||
|
||||
from binascii import b2a_hex
|
||||
from struct import pack, unpack
|
||||
|
||||
from BitTorrent.bitfield import Bitfield
|
||||
from BitTorrent.obsoletepythonsupport import *
|
||||
|
||||
def toint(s):
|
||||
return int(b2a_hex(s), 16)
|
||||
|
||||
def tobinary(i):
|
||||
return (chr(i >> 24) + chr((i >> 16) & 0xFF) +
|
||||
chr((i >> 8) & 0xFF) + chr(i & 0xFF))
|
||||
|
||||
CHOKE = chr(0)
|
||||
UNCHOKE = chr(1)
|
||||
INTERESTED = chr(2)
|
||||
NOT_INTERESTED = chr(3)
|
||||
# index
|
||||
HAVE = chr(4)
|
||||
# index, bitfield
|
||||
BITFIELD = chr(5)
|
||||
# index, begin, length
|
||||
REQUEST = chr(6)
|
||||
# index, begin, piece
|
||||
PIECE = chr(7)
|
||||
# index, begin, piece
|
||||
CANCEL = chr(8)
|
||||
|
||||
# 2-byte port message
|
||||
PORT = chr(9)
|
||||
|
||||
# reserved flags
|
||||
DHT = 1
|
||||
FLAGS = '\0' * 7 + '\1'
|
||||
protocol_name = 'BitTorrent protocol'
|
||||
|
||||
|
||||
class Connection(object):
|
||||
|
||||
def __init__(self, encoder, connection, id, is_local):
|
||||
self.encoder = encoder
|
||||
self.connection = connection
|
||||
self.connection.handler = self
|
||||
self.id = id
|
||||
self.ip = connection.ip
|
||||
self.locally_initiated = is_local
|
||||
self.complete = False
|
||||
self.closed = False
|
||||
self.got_anything = False
|
||||
self.next_upload = None
|
||||
self.upload = None
|
||||
self.download = None
|
||||
self._buffer = []
|
||||
self._buffer_len = 0
|
||||
self._reader = self._read_messages()
|
||||
self._next_len = self._reader.next()
|
||||
self._partial_message = None
|
||||
self._outqueue = []
|
||||
self.choke_sent = True
|
||||
self.uses_dht = False
|
||||
self.dht_port = None
|
||||
if self.locally_initiated:
|
||||
connection.write(chr(len(protocol_name)) + protocol_name +
|
||||
FLAGS + self.encoder.download_id)
|
||||
if self.id is not None:
|
||||
connection.write(self.encoder.my_id)
|
||||
|
||||
def close(self):
|
||||
if not self.closed:
|
||||
self.connection.close()
|
||||
self._sever()
|
||||
|
||||
def send_interested(self):
|
||||
self._send_message(INTERESTED)
|
||||
|
||||
def send_not_interested(self):
|
||||
self._send_message(NOT_INTERESTED)
|
||||
|
||||
def send_choke(self):
|
||||
if self._partial_message is None:
|
||||
self._send_message(CHOKE)
|
||||
self.choke_sent = True
|
||||
self.upload.sent_choke()
|
||||
|
||||
def send_unchoke(self):
|
||||
if self._partial_message is None:
|
||||
self._send_message(UNCHOKE)
|
||||
self.choke_sent = False
|
||||
|
||||
def send_port(self, port):
|
||||
self._send_message(PORT+pack('!H', port))
|
||||
|
||||
def send_request(self, index, begin, length):
|
||||
self._send_message(REQUEST + tobinary(index) +
|
||||
tobinary(begin) + tobinary(length))
|
||||
|
||||
def send_cancel(self, index, begin, length):
|
||||
self._send_message(CANCEL + tobinary(index) +
|
||||
tobinary(begin) + tobinary(length))
|
||||
|
||||
def send_bitfield(self, bitfield):
|
||||
self._send_message(BITFIELD + bitfield)
|
||||
|
||||
def send_have(self, index):
|
||||
self._send_message(HAVE + tobinary(index))
|
||||
|
||||
def send_keepalive(self):
|
||||
self._send_message('')
|
||||
|
||||
def send_partial(self, bytes):
|
||||
if self.closed:
|
||||
return 0
|
||||
if self._partial_message is None:
|
||||
s = self.upload.get_upload_chunk()
|
||||
if s is None:
|
||||
return 0
|
||||
index, begin, piece = s
|
||||
self._partial_message = ''.join((tobinary(len(piece) + 9), PIECE,
|
||||
tobinary(index), tobinary(begin), piece))
|
||||
if bytes < len(self._partial_message):
|
||||
self.upload.update_rate(bytes)
|
||||
self.connection.write(buffer(self._partial_message, 0, bytes))
|
||||
self._partial_message = buffer(self._partial_message, bytes)
|
||||
return bytes
|
||||
|
||||
queue = [str(self._partial_message)]
|
||||
self._partial_message = None
|
||||
if self.choke_sent != self.upload.choked:
|
||||
if self.upload.choked:
|
||||
self._outqueue.append(tobinary(1) + CHOKE)
|
||||
self.upload.sent_choke()
|
||||
else:
|
||||
self._outqueue.append(tobinary(1) + UNCHOKE)
|
||||
self.choke_sent = self.upload.choked
|
||||
queue.extend(self._outqueue)
|
||||
self._outqueue = []
|
||||
queue = ''.join(queue)
|
||||
self.upload.update_rate(len(queue))
|
||||
self.connection.write(queue)
|
||||
return len(queue)
|
||||
|
||||
# yields the number of bytes it wants next, gets those in self._message
|
||||
def _read_messages(self):
|
||||
yield 1 # header length
|
||||
if ord(self._message) != len(protocol_name):
|
||||
return
|
||||
|
||||
yield len(protocol_name)
|
||||
if self._message != protocol_name:
|
||||
return
|
||||
|
||||
yield 8 # reserved
|
||||
# dht is on last reserved byte
|
||||
if ord(self._message[7]) & DHT:
|
||||
self.uses_dht = True
|
||||
|
||||
yield 20 # download id
|
||||
if self.encoder.download_id is None: # incoming connection
|
||||
# modifies self.encoder if successful
|
||||
self.encoder.select_torrent(self, self._message)
|
||||
if self.encoder.download_id is None:
|
||||
return
|
||||
elif self._message != self.encoder.download_id:
|
||||
return
|
||||
if not self.locally_initiated:
|
||||
self.connection.write(chr(len(protocol_name)) + protocol_name +
|
||||
FLAGS + self.encoder.download_id + self.encoder.my_id)
|
||||
|
||||
yield 20 # peer id
|
||||
if not self.id:
|
||||
self.id = self._message
|
||||
if self.id == self.encoder.my_id:
|
||||
return
|
||||
for v in self.encoder.connections.itervalues():
|
||||
if v is not self:
|
||||
if v.id == self.id:
|
||||
return
|
||||
if self.encoder.config['one_connection_per_ip'] and \
|
||||
v.ip == self.ip:
|
||||
return
|
||||
if self.locally_initiated:
|
||||
self.connection.write(self.encoder.my_id)
|
||||
else:
|
||||
self.encoder.everinc = True
|
||||
else:
|
||||
if self._message != self.id:
|
||||
return
|
||||
self.complete = True
|
||||
self.encoder.connection_completed(self)
|
||||
|
||||
while True:
|
||||
yield 4 # message length
|
||||
l = toint(self._message)
|
||||
if l > self.encoder.config['max_message_length']:
|
||||
return
|
||||
if l > 0:
|
||||
yield l
|
||||
self._got_message(self._message)
|
||||
|
||||
def _got_message(self, message):
|
||||
t = message[0]
|
||||
if t == BITFIELD and self.got_anything:
|
||||
self.close()
|
||||
return
|
||||
self.got_anything = True
|
||||
if (t in [CHOKE, UNCHOKE, INTERESTED, NOT_INTERESTED] and
|
||||
len(message) != 1):
|
||||
self.close()
|
||||
return
|
||||
if t == CHOKE:
|
||||
self.download.got_choke()
|
||||
elif t == UNCHOKE:
|
||||
self.download.got_unchoke()
|
||||
elif t == INTERESTED:
|
||||
self.upload.got_interested()
|
||||
elif t == NOT_INTERESTED:
|
||||
self.upload.got_not_interested()
|
||||
elif t == HAVE:
|
||||
if len(message) != 5:
|
||||
self.close()
|
||||
return
|
||||
i = toint(message[1:])
|
||||
if i >= self.encoder.numpieces:
|
||||
self.close()
|
||||
return
|
||||
self.download.got_have(i)
|
||||
elif t == BITFIELD:
|
||||
try:
|
||||
b = Bitfield(self.encoder.numpieces, message[1:])
|
||||
except ValueError:
|
||||
self.close()
|
||||
return
|
||||
self.download.got_have_bitfield(b)
|
||||
elif t == REQUEST:
|
||||
if len(message) != 13:
|
||||
self.close()
|
||||
return
|
||||
i = toint(message[1:5])
|
||||
if i >= self.encoder.numpieces:
|
||||
self.close()
|
||||
return
|
||||
self.upload.got_request(i, toint(message[5:9]),
|
||||
toint(message[9:]))
|
||||
elif t == CANCEL:
|
||||
if len(message) != 13:
|
||||
self.close()
|
||||
return
|
||||
i = toint(message[1:5])
|
||||
if i >= self.encoder.numpieces:
|
||||
self.close()
|
||||
return
|
||||
self.upload.got_cancel(i, toint(message[5:9]),
|
||||
toint(message[9:]))
|
||||
elif t == PIECE:
|
||||
if len(message) <= 9:
|
||||
self.close()
|
||||
return
|
||||
i = toint(message[1:5])
|
||||
if i >= self.encoder.numpieces:
|
||||
self.close()
|
||||
return
|
||||
if self.download.got_piece(i, toint(message[5:9]), message[9:]):
|
||||
for co in self.encoder.complete_connections:
|
||||
co.send_have(i)
|
||||
elif t == PORT:
|
||||
if len(message) != 3:
|
||||
self.close()
|
||||
return
|
||||
self.dht_port = unpack('!H', message[1:3])[0]
|
||||
self.encoder.got_port(self)
|
||||
else:
|
||||
self.close()
|
||||
|
||||
def _sever(self):
|
||||
self.closed = True
|
||||
self._reader = None
|
||||
del self.encoder.connections[self.connection]
|
||||
self.encoder.replace_connection()
|
||||
if self.complete:
|
||||
del self.encoder.complete_connections[self]
|
||||
self.download.disconnected()
|
||||
self.encoder.choker.connection_lost(self)
|
||||
self.upload = self.download = None
|
||||
|
||||
def _send_message(self, message):
|
||||
s = tobinary(len(message)) + message
|
||||
if self._partial_message is not None:
|
||||
self._outqueue.append(s)
|
||||
else:
|
||||
self.connection.write(s)
|
||||
|
||||
def data_came_in(self, conn, s):
|
||||
while True:
|
||||
if self.closed:
|
||||
return
|
||||
i = self._next_len - self._buffer_len
|
||||
if i > len(s):
|
||||
self._buffer.append(s)
|
||||
self._buffer_len += len(s)
|
||||
return
|
||||
m = s[:i]
|
||||
if self._buffer_len > 0:
|
||||
self._buffer.append(m)
|
||||
m = ''.join(self._buffer)
|
||||
self._buffer = []
|
||||
self._buffer_len = 0
|
||||
s = s[i:]
|
||||
self._message = m
|
||||
try:
|
||||
self._next_len = self._reader.next()
|
||||
except StopIteration:
|
||||
self.close()
|
||||
return
|
||||
|
||||
def connection_lost(self, conn):
|
||||
assert conn is self.connection
|
||||
self._sever()
|
||||
|
||||
def connection_flushed(self, connection):
|
||||
if self.complete and self.next_upload is None and (self._partial_message is not None
|
||||
or (self.upload and self.upload.buffer)):
|
||||
self.encoder.ratelimiter.queue(self, self.encoder.context.rlgroup)
|
288
BitTorrent/ConvertedMetainfo.py
Executable file
288
BitTorrent/ConvertedMetainfo.py
Executable file
@ -0,0 +1,288 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Uoti Urpala
|
||||
|
||||
# required for Python 2.2
|
||||
from __future__ import generators
|
||||
|
||||
import os
|
||||
import sys
|
||||
from sha import sha
|
||||
|
||||
from BitTorrent.obsoletepythonsupport import *
|
||||
|
||||
from BitTorrent.bencode import bencode
|
||||
from BitTorrent import btformats
|
||||
from BitTorrent import BTFailure, WARNING, ERROR
|
||||
|
||||
|
||||
WINDOWS_UNSUPPORTED_CHARS ='"*/:<>?\|'
|
||||
windows_translate = [chr(i) for i in range(256)]
|
||||
for x in WINDOWS_UNSUPPORTED_CHARS:
|
||||
windows_translate[ord(x)] = '-'
|
||||
windows_translate = ''.join(windows_translate)
|
||||
|
||||
noncharacter_translate = {}
|
||||
for i in range(0xD800, 0xE000):
|
||||
noncharacter_translate[i] = ord('-')
|
||||
for i in range(0xFDD0, 0xFDF0):
|
||||
noncharacter_translate[i] = ord('-')
|
||||
for i in (0xFFFE, 0xFFFF):
|
||||
noncharacter_translate[i] = ord('-')
|
||||
|
||||
del x, i
|
||||
|
||||
def set_filesystem_encoding(encoding, errorfunc):
|
||||
global filesystem_encoding
|
||||
filesystem_encoding = 'ascii'
|
||||
if encoding == '':
|
||||
try:
|
||||
sys.getfilesystemencoding
|
||||
except AttributeError:
|
||||
errorfunc(WARNING,
|
||||
_("This seems to be an old Python version which "
|
||||
"does not support detecting the filesystem "
|
||||
"encoding. Assuming 'ascii'."))
|
||||
return
|
||||
encoding = sys.getfilesystemencoding()
|
||||
if encoding is None:
|
||||
errorfunc(WARNING,
|
||||
_("Python failed to autodetect filesystem encoding. "
|
||||
"Using 'ascii' instead."))
|
||||
return
|
||||
try:
|
||||
'a1'.decode(encoding)
|
||||
except:
|
||||
errorfunc(ERROR,
|
||||
_("Filesystem encoding '%s' is not supported. "
|
||||
"Using 'ascii' instead.") % encoding)
|
||||
return
|
||||
filesystem_encoding = encoding
|
||||
|
||||
|
||||
def generate_names(name, is_dir):
|
||||
if is_dir:
|
||||
prefix = name + '.'
|
||||
suffix = ''
|
||||
else:
|
||||
pos = name.rfind('.')
|
||||
if pos == -1:
|
||||
pos = len(name)
|
||||
prefix = name[:pos] + '.'
|
||||
suffix = name[pos:]
|
||||
i = 0
|
||||
while True:
|
||||
yield prefix + str(i) + suffix
|
||||
i += 1
|
||||
|
||||
|
||||
class ConvertedMetainfo(object):
|
||||
|
||||
def __init__(self, metainfo):
|
||||
self.bad_torrent_wrongfield = False
|
||||
self.bad_torrent_unsolvable = False
|
||||
self.bad_torrent_noncharacter = False
|
||||
self.bad_conversion = False
|
||||
self.bad_windows = False
|
||||
self.bad_path = False
|
||||
self.reported_errors = False
|
||||
self.is_batch = False
|
||||
self.orig_files = None
|
||||
self.files_fs = None
|
||||
self.total_bytes = 0
|
||||
self.sizes = []
|
||||
self.comment = None
|
||||
|
||||
btformats.check_message(metainfo, check_paths=False)
|
||||
info = metainfo['info']
|
||||
if info.has_key('length'):
|
||||
self.total_bytes = info['length']
|
||||
self.sizes.append(self.total_bytes)
|
||||
else:
|
||||
self.is_batch = True
|
||||
r = []
|
||||
self.orig_files = []
|
||||
self.sizes = []
|
||||
i = 0
|
||||
for f in info['files']:
|
||||
l = f['length']
|
||||
self.total_bytes += l
|
||||
self.sizes.append(l)
|
||||
path = self._get_attr_utf8(f, 'path')
|
||||
for x in path:
|
||||
if not btformats.allowed_path_re.match(x):
|
||||
if l > 0:
|
||||
raise BTFailure(_("Bad file path component: ")+x)
|
||||
# BitComet makes bad .torrent files with empty
|
||||
# filename part
|
||||
self.bad_path = True
|
||||
break
|
||||
else:
|
||||
p = []
|
||||
for x in path:
|
||||
p.append((self._enforce_utf8(x), x))
|
||||
path = p
|
||||
self.orig_files.append('/'.join([x[0] for x in path]))
|
||||
k = []
|
||||
for u,o in path:
|
||||
tf2 = self._to_fs_2(u)
|
||||
k.append((tf2, u, o))
|
||||
r.append((k,i))
|
||||
i += 1
|
||||
# If two or more file/subdirectory names in the same directory
|
||||
# would map to the same name after encoding conversions + Windows
|
||||
# workarounds, change them. Files are changed as
|
||||
# 'a.b.c'->'a.b.0.c', 'a.b.1.c' etc, directories or files without
|
||||
# '.' as 'a'->'a.0', 'a.1' etc. If one of the multiple original
|
||||
# names was a "clean" conversion, that one is always unchanged
|
||||
# and the rest are adjusted.
|
||||
r.sort()
|
||||
self.files_fs = [None] * len(r)
|
||||
prev = [None]
|
||||
res = []
|
||||
stack = [{}]
|
||||
for x in r:
|
||||
j = 0
|
||||
x, i = x
|
||||
while x[j] == prev[j]:
|
||||
j += 1
|
||||
del res[j:]
|
||||
del stack[j+1:]
|
||||
name = x[j][0][1]
|
||||
if name in stack[-1]:
|
||||
for name in generate_names(x[j][1], j != len(x) - 1):
|
||||
name = self._to_fs(name)
|
||||
if name not in stack[-1]:
|
||||
break
|
||||
stack[-1][name] = None
|
||||
res.append(name)
|
||||
for j in range(j + 1, len(x)):
|
||||
name = x[j][0][1]
|
||||
stack.append({name: None})
|
||||
res.append(name)
|
||||
self.files_fs[i] = os.path.join(*res)
|
||||
prev = x
|
||||
|
||||
self.name = self._get_field_utf8(info, 'name')
|
||||
self.name_fs = self._to_fs(self.name)
|
||||
self.piece_length = info['piece length']
|
||||
self.is_trackerless = False
|
||||
if metainfo.has_key('announce'):
|
||||
self.announce = metainfo['announce']
|
||||
elif metainfo.has_key('nodes'):
|
||||
self.is_trackerless = True
|
||||
self.nodes = metainfo['nodes']
|
||||
|
||||
if metainfo.has_key('comment'):
|
||||
self.comment = metainfo['comment']
|
||||
|
||||
self.hashes = [info['pieces'][x:x+20] for x in xrange(0,
|
||||
len(info['pieces']), 20)]
|
||||
self.infohash = sha(bencode(info)).digest()
|
||||
|
||||
def show_encoding_errors(self, errorfunc):
|
||||
self.reported_errors = True
|
||||
if self.bad_torrent_unsolvable:
|
||||
errorfunc(ERROR,
|
||||
_("This .torrent file has been created with a broken "
|
||||
"tool and has incorrectly encoded filenames. Some or "
|
||||
"all of the filenames may appear different from what "
|
||||
"the creator of the .torrent file intended."))
|
||||
elif self.bad_torrent_noncharacter:
|
||||
errorfunc(ERROR,
|
||||
_("This .torrent file has been created with a broken "
|
||||
"tool and has bad character values that do not "
|
||||
"correspond to any real character. Some or all of the "
|
||||
"filenames may appear different from what the creator "
|
||||
"of the .torrent file intended."))
|
||||
elif self.bad_torrent_wrongfield:
|
||||
errorfunc(ERROR,
|
||||
_("This .torrent file has been created with a broken "
|
||||
"tool and has incorrectly encoded filenames. The "
|
||||
"names used may still be correct."))
|
||||
elif self.bad_conversion:
|
||||
errorfunc(WARNING,
|
||||
_('The character set used on the local filesystem ("%s") '
|
||||
'cannot represent all characters used in the '
|
||||
'filename(s) of this torrent. Filenames have been '
|
||||
'changed from the original.') % filesystem_encoding)
|
||||
elif self.bad_windows:
|
||||
errorfunc(WARNING,
|
||||
_("The Windows filesystem cannot handle some "
|
||||
"characters used in the filename(s) of this torrent."
|
||||
"Filenames have been changed from the original."))
|
||||
elif self.bad_path:
|
||||
errorfunc(WARNING,
|
||||
_("This .torrent file has been created with a broken "
|
||||
"tool and has at least 1 file with an invalid file "
|
||||
"or directory name. However since all such files "
|
||||
"were marked as having length 0 those files are "
|
||||
"just ignored."))
|
||||
|
||||
# At least BitComet seems to make bad .torrent files that have
|
||||
# fields in an arbitrary encoding but separate 'field.utf-8' attributes
|
||||
def _get_attr_utf8(self, d, attrib):
|
||||
v = d.get(attrib + '.utf-8')
|
||||
if v is not None:
|
||||
if v != d[attrib]:
|
||||
self.bad_torrent_wrongfield = True
|
||||
else:
|
||||
v = d[attrib]
|
||||
return v
|
||||
|
||||
def _enforce_utf8(self, s):
|
||||
try:
|
||||
s = s.decode('utf-8')
|
||||
except:
|
||||
self.bad_torrent_unsolvable = True
|
||||
s = s.decode('utf-8', 'replace')
|
||||
t = s.translate(noncharacter_translate)
|
||||
if t != s:
|
||||
self.bad_torrent_noncharacter = True
|
||||
return t.encode('utf-8')
|
||||
|
||||
def _get_field_utf8(self, d, attrib):
|
||||
r = self._get_attr_utf8(d, attrib)
|
||||
return self._enforce_utf8(r)
|
||||
|
||||
def _fix_windows(self, name, t=windows_translate):
|
||||
bad = False
|
||||
r = name.translate(t)
|
||||
# for some reason name cannot end with '.' or space
|
||||
if r[-1] in '. ':
|
||||
r = r + '-'
|
||||
if r != name:
|
||||
self.bad_windows = True
|
||||
bad = True
|
||||
return (r, bad)
|
||||
|
||||
def _to_fs(self, name):
|
||||
return self._to_fs_2(name)[1]
|
||||
|
||||
def _to_fs_2(self, name):
|
||||
bad = False
|
||||
if sys.platform.startswith('win'):
|
||||
name, bad = self._fix_windows(name)
|
||||
name = name.decode('utf-8')
|
||||
try:
|
||||
r = name.encode(filesystem_encoding)
|
||||
except:
|
||||
self.bad_conversion = True
|
||||
bad = True
|
||||
r = name.encode(filesystem_encoding, 'replace')
|
||||
|
||||
if sys.platform.startswith('win'):
|
||||
# encoding to mbcs with or without 'replace' will make the
|
||||
# name unsupported by windows again because it adds random
|
||||
# '?' characters which are invalid windows filesystem
|
||||
# character
|
||||
r, bad = self._fix_windows(r)
|
||||
return (bad, r)
|
48
BitTorrent/CurrentRateMeasure.py
Executable file
48
BitTorrent/CurrentRateMeasure.py
Executable file
@ -0,0 +1,48 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Bram Cohen
|
||||
|
||||
from BitTorrent.platform import bttime
|
||||
|
||||
|
||||
class Measure(object):
|
||||
|
||||
def __init__(self, max_rate_period, fudge=5):
|
||||
self.max_rate_period = max_rate_period
|
||||
self.ratesince = bttime() - fudge
|
||||
self.last = self.ratesince
|
||||
self.rate = 0.0
|
||||
self.total = 0
|
||||
|
||||
def update_rate(self, amount):
|
||||
self.total += amount
|
||||
t = bttime()
|
||||
self.rate = (self.rate * (self.last - self.ratesince) +
|
||||
amount) / (t - self.ratesince)
|
||||
self.last = t
|
||||
if self.ratesince < t - self.max_rate_period:
|
||||
self.ratesince = t - self.max_rate_period
|
||||
|
||||
def get_rate(self):
|
||||
self.update_rate(0)
|
||||
return self.rate
|
||||
|
||||
def get_rate_noupdate(self):
|
||||
return self.rate
|
||||
|
||||
def time_until_rate(self, newrate):
|
||||
if self.rate <= newrate:
|
||||
return 0
|
||||
t = bttime() - self.ratesince
|
||||
return ((self.rate * t) / newrate) - t
|
||||
|
||||
def get_total(self):
|
||||
return self.total
|
33
BitTorrent/Desktop.py
Executable file
33
BitTorrent/Desktop.py
Executable file
@ -0,0 +1,33 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# written by Matt Chisholm
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from BitTorrent.platform import get_home_dir, get_shell_dir
|
||||
if os.name == 'nt':
|
||||
from win32com.shell import shellcon
|
||||
|
||||
desktop = None
|
||||
|
||||
if os.name == 'nt':
|
||||
desktop = get_shell_dir(shellcon.CSIDL_DESKTOPDIRECTORY)
|
||||
else:
|
||||
homedir = get_home_dir()
|
||||
if homedir == None :
|
||||
desktop = '/tmp/'
|
||||
else:
|
||||
desktop = homedir
|
||||
if os.name in ('mac', 'posix'):
|
||||
tmp_desktop = os.path.join(homedir, 'Desktop')
|
||||
if os.access(tmp_desktop, os.R_OK|os.W_OK):
|
||||
desktop = tmp_desktop + os.sep
|
363
BitTorrent/Downloader.py
Executable file
363
BitTorrent/Downloader.py
Executable file
@ -0,0 +1,363 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Bram Cohen, Uoti Urpala
|
||||
|
||||
from random import shuffle
|
||||
|
||||
from BitTorrent.platform import bttime
|
||||
from BitTorrent.CurrentRateMeasure import Measure
|
||||
from BitTorrent.bitfield import Bitfield
|
||||
|
||||
|
||||
class PerIPStats(object):
|
||||
|
||||
def __init__(self):
|
||||
self.numgood = 0
|
||||
self.bad = {}
|
||||
self.numconnections = 0
|
||||
self.lastdownload = None
|
||||
self.peerid = None
|
||||
|
||||
|
||||
class BadDataGuard(object):
|
||||
|
||||
def __init__(self, download):
|
||||
self.download = download
|
||||
self.ip = download.connection.ip
|
||||
self.downloader = download.downloader
|
||||
self.stats = self.downloader.perip[self.ip]
|
||||
self.lastindex = None
|
||||
|
||||
def bad(self, index, bump = False):
|
||||
self.stats.bad.setdefault(index, 0)
|
||||
self.stats.bad[index] += 1
|
||||
if self.ip not in self.downloader.bad_peers:
|
||||
self.downloader.bad_peers[self.ip] = (False, self.stats)
|
||||
if self.download is not None:
|
||||
self.downloader.kick(self.download)
|
||||
self.download = None
|
||||
elif len(self.stats.bad) > 1 and self.stats.numconnections == 1 and \
|
||||
self.stats.lastdownload is not None:
|
||||
# kick new connection from same IP if previous one sent bad data,
|
||||
# mainly to give the algorithm time to find other bad pieces
|
||||
# in case the peer is sending a lot of bad data
|
||||
self.downloader.kick(self.stats.lastdownload)
|
||||
if len(self.stats.bad) >= 3 and len(self.stats.bad) > \
|
||||
self.stats.numgood // 30:
|
||||
self.downloader.ban(self.ip)
|
||||
elif bump:
|
||||
self.downloader.picker.bump(index)
|
||||
|
||||
def good(self, index):
|
||||
# lastindex is a hack to only increase numgood for by one for each good
|
||||
# piece, however many chunks came from the connection(s) from this IP
|
||||
if index != self.lastindex:
|
||||
self.stats.numgood += 1
|
||||
self.lastindex = index
|
||||
|
||||
|
||||
class SingleDownload(object):
|
||||
|
||||
def __init__(self, downloader, connection):
|
||||
self.downloader = downloader
|
||||
self.connection = connection
|
||||
self.choked = True
|
||||
self.interested = False
|
||||
self.active_requests = []
|
||||
self.measure = Measure(downloader.config['max_rate_period'])
|
||||
self.peermeasure = Measure(max(downloader.storage.piece_size / 10000,
|
||||
20))
|
||||
self.have = Bitfield(downloader.numpieces)
|
||||
self.last = 0
|
||||
self.example_interest = None
|
||||
self.backlog = 2
|
||||
self.guard = BadDataGuard(self)
|
||||
|
||||
def _backlog(self):
|
||||
backlog = 2 + int(4 * self.measure.get_rate() /
|
||||
self.downloader.chunksize)
|
||||
if backlog > 50:
|
||||
backlog = max(50, int(.075 * backlog))
|
||||
self.backlog = backlog
|
||||
return backlog
|
||||
|
||||
def disconnected(self):
|
||||
self.downloader.lost_peer(self)
|
||||
for i in xrange(len(self.have)):
|
||||
if self.have[i]:
|
||||
self.downloader.picker.lost_have(i)
|
||||
self._letgo()
|
||||
self.guard.download = None
|
||||
|
||||
def _letgo(self):
|
||||
if not self.active_requests:
|
||||
return
|
||||
if self.downloader.storage.endgame:
|
||||
self.active_requests = []
|
||||
return
|
||||
lost = []
|
||||
for index, begin, length in self.active_requests:
|
||||
self.downloader.storage.request_lost(index, begin, length)
|
||||
if index not in lost:
|
||||
lost.append(index)
|
||||
self.active_requests = []
|
||||
ds = [d for d in self.downloader.downloads if not d.choked]
|
||||
shuffle(ds)
|
||||
for d in ds:
|
||||
d._request_more(lost)
|
||||
for d in self.downloader.downloads:
|
||||
if d.choked and not d.interested:
|
||||
for l in lost:
|
||||
if d.have[l] and self.downloader.storage.do_I_have_requests(l):
|
||||
d.interested = True
|
||||
d.connection.send_interested()
|
||||
break
|
||||
|
||||
def got_choke(self):
|
||||
if not self.choked:
|
||||
self.choked = True
|
||||
self._letgo()
|
||||
|
||||
def got_unchoke(self):
|
||||
if self.choked:
|
||||
self.choked = False
|
||||
if self.interested:
|
||||
self._request_more()
|
||||
|
||||
def got_piece(self, index, begin, piece):
|
||||
try:
|
||||
self.active_requests.remove((index, begin, len(piece)))
|
||||
except ValueError:
|
||||
self.downloader.discarded_bytes += len(piece)
|
||||
return False
|
||||
if self.downloader.storage.endgame:
|
||||
self.downloader.all_requests.remove((index, begin, len(piece)))
|
||||
self.last = bttime()
|
||||
self.measure.update_rate(len(piece))
|
||||
self.downloader.measurefunc(len(piece))
|
||||
self.downloader.downmeasure.update_rate(len(piece))
|
||||
if not self.downloader.storage.piece_came_in(index, begin, piece,
|
||||
self.guard):
|
||||
if self.downloader.storage.endgame:
|
||||
while self.downloader.storage.do_I_have_requests(index):
|
||||
nb, nl = self.downloader.storage.new_request(index)
|
||||
self.downloader.all_requests.append((index, nb, nl))
|
||||
for d in self.downloader.downloads:
|
||||
d.fix_download_endgame()
|
||||
return False
|
||||
ds = [d for d in self.downloader.downloads if not d.choked]
|
||||
shuffle(ds)
|
||||
for d in ds:
|
||||
d._request_more([index])
|
||||
return False
|
||||
if self.downloader.storage.do_I_have(index):
|
||||
self.downloader.picker.complete(index)
|
||||
if self.downloader.storage.endgame:
|
||||
for d in self.downloader.downloads:
|
||||
if d is not self and d.interested:
|
||||
if d.choked:
|
||||
d.fix_download_endgame()
|
||||
else:
|
||||
try:
|
||||
d.active_requests.remove((index, begin, len(piece)))
|
||||
except ValueError:
|
||||
continue
|
||||
d.connection.send_cancel(index, begin, len(piece))
|
||||
d.fix_download_endgame()
|
||||
self._request_more()
|
||||
if self.downloader.picker.am_I_complete():
|
||||
for d in [i for i in self.downloader.downloads if i.have.numfalse == 0]:
|
||||
d.connection.close()
|
||||
return self.downloader.storage.do_I_have(index)
|
||||
|
||||
def _want(self, index):
|
||||
return self.have[index] and self.downloader.storage.do_I_have_requests(index)
|
||||
|
||||
def _request_more(self, indices = None):
|
||||
assert not self.choked
|
||||
if len(self.active_requests) >= self._backlog():
|
||||
return
|
||||
if self.downloader.storage.endgame:
|
||||
self.fix_download_endgame()
|
||||
return
|
||||
lost_interests = []
|
||||
while len(self.active_requests) < self.backlog:
|
||||
if indices is None:
|
||||
interest = self.downloader.picker.next(self._want, self.have.numfalse == 0)
|
||||
else:
|
||||
interest = None
|
||||
for i in indices:
|
||||
if self.have[i] and self.downloader.storage.do_I_have_requests(i):
|
||||
interest = i
|
||||
break
|
||||
if interest is None:
|
||||
break
|
||||
if not self.interested:
|
||||
self.interested = True
|
||||
self.connection.send_interested()
|
||||
self.example_interest = interest
|
||||
self.downloader.picker.requested(interest, self.have.numfalse == 0)
|
||||
while len(self.active_requests) < (self.backlog-2) * 5 + 2:
|
||||
begin, length = self.downloader.storage.new_request(interest)
|
||||
self.active_requests.append((interest, begin, length))
|
||||
self.connection.send_request(interest, begin, length)
|
||||
if not self.downloader.storage.do_I_have_requests(interest):
|
||||
lost_interests.append(interest)
|
||||
break
|
||||
if not self.active_requests and self.interested:
|
||||
self.interested = False
|
||||
self.connection.send_not_interested()
|
||||
if lost_interests:
|
||||
for d in self.downloader.downloads:
|
||||
if d.active_requests or not d.interested:
|
||||
continue
|
||||
if d.example_interest is not None and self.downloader.storage.do_I_have_requests(d.example_interest):
|
||||
continue
|
||||
for lost in lost_interests:
|
||||
if d.have[lost]:
|
||||
break
|
||||
else:
|
||||
continue
|
||||
interest = self.downloader.picker.next(d._want, d.have.numfalse == 0)
|
||||
if interest is None:
|
||||
d.interested = False
|
||||
d.connection.send_not_interested()
|
||||
else:
|
||||
d.example_interest = interest
|
||||
if self.downloader.storage.endgame:
|
||||
self.downloader.all_requests = []
|
||||
for d in self.downloader.downloads:
|
||||
self.downloader.all_requests.extend(d.active_requests)
|
||||
for d in self.downloader.downloads:
|
||||
d.fix_download_endgame()
|
||||
|
||||
def fix_download_endgame(self):
|
||||
want = [a for a in self.downloader.all_requests if self.have[a[0]] and a not in self.active_requests]
|
||||
if self.interested and not self.active_requests and not want:
|
||||
self.interested = False
|
||||
self.connection.send_not_interested()
|
||||
return
|
||||
if not self.interested and want:
|
||||
self.interested = True
|
||||
self.connection.send_interested()
|
||||
if self.choked or len(self.active_requests) >= self._backlog():
|
||||
return
|
||||
shuffle(want)
|
||||
del want[self.backlog - len(self.active_requests):]
|
||||
self.active_requests.extend(want)
|
||||
for piece, begin, length in want:
|
||||
self.connection.send_request(piece, begin, length)
|
||||
|
||||
def got_have(self, index):
|
||||
if self.have[index]:
|
||||
return
|
||||
if index == self.downloader.numpieces-1:
|
||||
self.peermeasure.update_rate(self.downloader.storage.total_length-
|
||||
(self.downloader.numpieces-1)*self.downloader.storage.piece_size)
|
||||
else:
|
||||
self.peermeasure.update_rate(self.downloader.storage.piece_size)
|
||||
self.have[index] = True
|
||||
self.downloader.picker.got_have(index)
|
||||
if self.downloader.picker.am_I_complete() and self.have.numfalse == 0:
|
||||
self.connection.close()
|
||||
return
|
||||
if self.downloader.storage.endgame:
|
||||
self.fix_download_endgame()
|
||||
elif self.downloader.storage.do_I_have_requests(index):
|
||||
if not self.choked:
|
||||
self._request_more([index])
|
||||
else:
|
||||
if not self.interested:
|
||||
self.interested = True
|
||||
self.connection.send_interested()
|
||||
|
||||
def got_have_bitfield(self, have):
|
||||
if self.downloader.picker.am_I_complete() and have.numfalse == 0:
|
||||
self.connection.close()
|
||||
return
|
||||
self.have = have
|
||||
for i in xrange(len(self.have)):
|
||||
if self.have[i]:
|
||||
self.downloader.picker.got_have(i)
|
||||
if self.downloader.storage.endgame:
|
||||
for piece, begin, length in self.downloader.all_requests:
|
||||
if self.have[piece]:
|
||||
self.interested = True
|
||||
self.connection.send_interested()
|
||||
return
|
||||
for i in xrange(len(self.have)):
|
||||
if self.have[i] and self.downloader.storage.do_I_have_requests(i):
|
||||
self.interested = True
|
||||
self.connection.send_interested()
|
||||
return
|
||||
|
||||
def get_rate(self):
|
||||
return self.measure.get_rate()
|
||||
|
||||
def is_snubbed(self):
|
||||
return bttime() - self.last > self.downloader.snub_time
|
||||
|
||||
|
||||
class Downloader(object):
|
||||
|
||||
def __init__(self, config, storage, picker, numpieces, downmeasure,
|
||||
measurefunc, kickfunc, banfunc):
|
||||
self.config = config
|
||||
self.storage = storage
|
||||
self.picker = picker
|
||||
self.chunksize = config['download_slice_size']
|
||||
self.downmeasure = downmeasure
|
||||
self.numpieces = numpieces
|
||||
self.snub_time = config['snub_time']
|
||||
self.measurefunc = measurefunc
|
||||
self.kickfunc = kickfunc
|
||||
self.banfunc = banfunc
|
||||
self.downloads = []
|
||||
self.perip = {}
|
||||
self.bad_peers = {}
|
||||
self.discarded_bytes = 0
|
||||
|
||||
def make_download(self, connection):
|
||||
ip = connection.ip
|
||||
perip = self.perip.get(ip)
|
||||
if perip is None:
|
||||
perip = PerIPStats()
|
||||
self.perip[ip] = perip
|
||||
perip.numconnections += 1
|
||||
d = SingleDownload(self, connection)
|
||||
perip.lastdownload = d
|
||||
perip.peerid = connection.id
|
||||
self.downloads.append(d)
|
||||
return d
|
||||
|
||||
def lost_peer(self, download):
|
||||
self.downloads.remove(download)
|
||||
ip = download.connection.ip
|
||||
self.perip[ip].numconnections -= 1
|
||||
if self.perip[ip].lastdownload == download:
|
||||
self.perip[ip].lastdownload = None
|
||||
|
||||
def kick(self, download):
|
||||
if not self.config['retaliate_to_garbled_data']:
|
||||
return
|
||||
ip = download.connection.ip
|
||||
peerid = download.connection.id
|
||||
# kickfunc will schedule connection.close() to be executed later; we
|
||||
# might now be inside RawServer event loop with events from that
|
||||
# connection already queued, and trying to handle them after doing
|
||||
# close() now could cause problems.
|
||||
self.kickfunc(download.connection)
|
||||
|
||||
def ban(self, ip):
|
||||
if not self.config['retaliate_to_garbled_data']:
|
||||
return
|
||||
self.banfunc(ip)
|
||||
self.bad_peers[ip] = (True, self.perip[ip])
|
139
BitTorrent/DownloaderFeedback.py
Executable file
139
BitTorrent/DownloaderFeedback.py
Executable file
@ -0,0 +1,139 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Bram Cohen, Uoti Urpala
|
||||
|
||||
from __future__ import division
|
||||
|
||||
|
||||
class DownloaderFeedback(object):
|
||||
|
||||
def __init__(self, choker, upfunc, upfunc2, downfunc, uptotal, downtotal,
|
||||
remainingfunc, leftfunc, file_length, finflag, downloader,
|
||||
files, ever_got_incoming, rerequester):
|
||||
self.downloader = downloader
|
||||
self.picker = downloader.picker
|
||||
self.storage = downloader.storage
|
||||
self.choker = choker
|
||||
self.upfunc = upfunc
|
||||
self.upfunc2 = upfunc2
|
||||
self.downfunc = downfunc
|
||||
self.uptotal = uptotal
|
||||
self.downtotal = downtotal
|
||||
self.remainingfunc = remainingfunc
|
||||
self.leftfunc = leftfunc
|
||||
self.file_length = file_length
|
||||
self.finflag = finflag
|
||||
self.files = files
|
||||
self.ever_got_incoming = ever_got_incoming
|
||||
self.rerequester = rerequester
|
||||
self.lastids = []
|
||||
|
||||
def _rotate(self):
|
||||
cs = self.choker.connections
|
||||
for peerid in self.lastids:
|
||||
for i in xrange(len(cs)):
|
||||
if cs[i].id == peerid:
|
||||
return cs[i:] + cs[:i]
|
||||
return cs
|
||||
|
||||
def collect_spew(self):
|
||||
l = [ ]
|
||||
cs = self._rotate()
|
||||
self.lastids = [c.id for c in cs]
|
||||
for c in cs:
|
||||
rec = {}
|
||||
rec['id'] = c.id
|
||||
rec["ip"] = c.ip
|
||||
rec["is_optimistic_unchoke"] = (c is self.choker.connections[0])
|
||||
if c.locally_initiated:
|
||||
rec["initiation"] = "L"
|
||||
else:
|
||||
rec["initiation"] = "R"
|
||||
u = c.upload
|
||||
rec["upload"] = (u.measure.get_total(), int(u.measure.get_rate()),
|
||||
u.interested, u.choked)
|
||||
|
||||
d = c.download
|
||||
rec["download"] = (d.measure.get_total(),int(d.measure.get_rate()),
|
||||
d.interested, d.choked, d.is_snubbed())
|
||||
rec['completed'] = 1 - d.have.numfalse / len(d.have)
|
||||
rec['speed'] = d.connection.download.peermeasure.get_rate()
|
||||
l.append(rec)
|
||||
return l
|
||||
|
||||
def get_statistics(self, spewflag=False, fileflag=False):
|
||||
status = {}
|
||||
numSeeds = 0
|
||||
numPeers = 0
|
||||
for d in self.downloader.downloads:
|
||||
if d.have.numfalse == 0:
|
||||
numSeeds += 1
|
||||
else:
|
||||
numPeers += 1
|
||||
status['numSeeds'] = numSeeds
|
||||
status['numPeers'] = numPeers
|
||||
status['trackerSeeds'] = self.rerequester.tracker_num_seeds
|
||||
status['trackerPeers'] = self.rerequester.tracker_num_peers
|
||||
status['upRate'] = self.upfunc()
|
||||
status['upRate2'] = self.upfunc2()
|
||||
status['upTotal'] = self.uptotal()
|
||||
status['ever_got_incoming'] = self.ever_got_incoming()
|
||||
missingPieces = 0
|
||||
numCopyList = []
|
||||
numCopies = 0
|
||||
for i in self.picker.crosscount:
|
||||
missingPieces += i
|
||||
if missingPieces == 0:
|
||||
numCopies += 1
|
||||
else:
|
||||
fraction = 1 - missingPieces / self.picker.numpieces
|
||||
numCopyList.append(fraction)
|
||||
if fraction == 0 or len(numCopyList) >= 3:
|
||||
break
|
||||
numCopies -= numSeeds
|
||||
if self.picker.numgot == self.picker.numpieces:
|
||||
numCopies -= 1
|
||||
status['numCopies'] = numCopies
|
||||
status['numCopyList'] = numCopyList
|
||||
status['discarded'] = self.downloader.discarded_bytes
|
||||
status['storage_numcomplete'] = self.storage.stat_numfound + \
|
||||
self.storage.stat_numdownloaded
|
||||
status['storage_dirty'] = len(self.storage.stat_dirty)
|
||||
status['storage_active'] = len(self.storage.stat_active)
|
||||
status['storage_new'] = len(self.storage.stat_new)
|
||||
status['storage_numflunked'] = self.storage.stat_numflunked
|
||||
|
||||
if spewflag:
|
||||
status['spew'] = self.collect_spew()
|
||||
status['bad_peers'] = self.downloader.bad_peers
|
||||
if fileflag:
|
||||
undl = self.storage.storage.undownloaded
|
||||
unal = self.storage.storage.unallocated
|
||||
status['files_left'] = [undl[fname] for fname in self.files]
|
||||
status['files_allocated'] = [not unal[fn] for fn in self.files]
|
||||
if self.finflag.isSet():
|
||||
status['downRate'] = 0
|
||||
status['downTotal'] = self.downtotal()
|
||||
status['fractionDone'] = 1
|
||||
return status
|
||||
timeEst = self.remainingfunc()
|
||||
status['timeEst'] = timeEst
|
||||
|
||||
if self.file_length > 0:
|
||||
fractionDone = 1 - self.leftfunc() / self.file_length
|
||||
else:
|
||||
fractionDone = 1
|
||||
status.update({
|
||||
"fractionDone" : fractionDone,
|
||||
"downRate" : self.downfunc(),
|
||||
"downTotal" : self.downtotal()
|
||||
})
|
||||
return status
|
185
BitTorrent/Encoder.py
Executable file
185
BitTorrent/Encoder.py
Executable file
@ -0,0 +1,185 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Bram Cohen
|
||||
|
||||
from socket import error as socketerror
|
||||
|
||||
from BitTorrent import BTFailure
|
||||
from BitTorrent.RawServer_magic import Handler
|
||||
from BitTorrent.Connecter import Connection
|
||||
from BitTorrent.platform import is_frozen_exe
|
||||
from BitTorrent.ClientIdentifier import identify_client
|
||||
|
||||
# header, reserved, download id, my id, [length, message]
|
||||
|
||||
class InitialConnectionHandler(Handler):
|
||||
def __init__(self, parent, id):
|
||||
self.parent = parent
|
||||
self.id = id
|
||||
def connection_started(self, s):
|
||||
con = Connection(self.parent, s, self.id, True)
|
||||
self.parent.connections[s] = con
|
||||
|
||||
class Encoder(object):
|
||||
|
||||
def __init__(self, make_upload, downloader, choker, numpieces, ratelimiter,
|
||||
raw_server, config, my_id, schedulefunc, download_id, context, addcontactfunc, reported_port):
|
||||
self.make_upload = make_upload
|
||||
self.downloader = downloader
|
||||
self.choker = choker
|
||||
self.numpieces = numpieces
|
||||
self.ratelimiter = ratelimiter
|
||||
self.raw_server = raw_server
|
||||
self.my_id = my_id
|
||||
self.config = config
|
||||
self.schedulefunc = schedulefunc
|
||||
self.download_id = download_id
|
||||
self.context = context
|
||||
self.addcontact = addcontactfunc
|
||||
self.reported_port = reported_port
|
||||
self.everinc = False
|
||||
self.connections = {}
|
||||
self.complete_connections = {}
|
||||
self.spares = []
|
||||
self.banned = {}
|
||||
schedulefunc(self.send_keepalives, config['keepalive_interval'])
|
||||
|
||||
def send_keepalives(self):
|
||||
self.schedulefunc(self.send_keepalives,
|
||||
self.config['keepalive_interval'])
|
||||
for c in self.complete_connections:
|
||||
c.send_keepalive()
|
||||
|
||||
def start_connection(self, dns, id):
|
||||
if dns[0] in self.banned:
|
||||
return
|
||||
if id == self.my_id:
|
||||
return
|
||||
for v in self.connections.values():
|
||||
if id and v.id == id:
|
||||
return
|
||||
if self.config['one_connection_per_ip'] and v.ip == dns[0]:
|
||||
return
|
||||
if len(self.connections) >= self.config['max_initiate']:
|
||||
if len(self.spares) < self.config['max_initiate'] and \
|
||||
dns not in self.spares:
|
||||
self.spares.append(dns)
|
||||
return
|
||||
self.raw_server.async_start_connection(dns, InitialConnectionHandler(self, id), self.context)
|
||||
|
||||
|
||||
def connection_completed(self, c):
|
||||
self.complete_connections[c] = 1
|
||||
c.upload = self.make_upload(c)
|
||||
c.download = self.downloader.make_download(c)
|
||||
self.choker.connection_made(c)
|
||||
if c.uses_dht:
|
||||
c.send_port(self.reported_port)
|
||||
|
||||
def got_port(self, c):
|
||||
if self.addcontact and c.uses_dht and c.dht_port != None:
|
||||
self.addcontact(c.connection.ip, c.dht_port)
|
||||
|
||||
def ever_got_incoming(self):
|
||||
return self.everinc
|
||||
|
||||
def how_many_connections(self):
|
||||
return len(self.complete_connections)
|
||||
|
||||
def replace_connection(self):
|
||||
while len(self.connections) < self.config['max_initiate'] and \
|
||||
self.spares:
|
||||
self.start_connection(self.spares.pop(), None)
|
||||
|
||||
def close_connections(self):
|
||||
for c in self.connections.itervalues():
|
||||
if not c.closed:
|
||||
c.connection.close()
|
||||
c.closed = True
|
||||
|
||||
def singleport_connection(self, listener, con):
|
||||
if con.ip in self.banned:
|
||||
return
|
||||
m = self.config['max_allow_in']
|
||||
if m and len(self.connections) >= m:
|
||||
return
|
||||
self.connections[con.connection] = con
|
||||
del listener.connections[con.connection]
|
||||
con.encoder = self
|
||||
con.connection.context = self.context
|
||||
|
||||
def ban(self, ip):
|
||||
self.banned[ip] = None
|
||||
|
||||
|
||||
class SingleportListener(Handler):
|
||||
|
||||
def __init__(self, rawserver):
|
||||
self.rawserver = rawserver
|
||||
self.port = 0
|
||||
self.ports = {}
|
||||
self.torrents = {}
|
||||
self.connections = {}
|
||||
self.download_id = None
|
||||
|
||||
def _check_close(self, port):
|
||||
if not port or self.port == port or self.ports[port][1] > 0:
|
||||
return
|
||||
serversocket = self.ports[port][0]
|
||||
self.rawserver.stop_listening(serversocket)
|
||||
serversocket.close()
|
||||
del self.ports[port]
|
||||
|
||||
def open_port(self, port, config):
|
||||
if port in self.ports:
|
||||
self.port = port
|
||||
return
|
||||
serversocket = self.rawserver.create_serversocket(
|
||||
port, config['bind'], reuse=True, tos=config['peer_socket_tos'])
|
||||
self.rawserver.start_listening(serversocket, self)
|
||||
oldport = self.port
|
||||
self.port = port
|
||||
self.ports[port] = [serversocket, 0]
|
||||
self._check_close(oldport)
|
||||
|
||||
def get_port(self):
|
||||
if self.port:
|
||||
self.ports[self.port][1] += 1
|
||||
return self.port
|
||||
|
||||
def release_port(self, port):
|
||||
self.ports[port][1] -= 1
|
||||
self._check_close(port)
|
||||
|
||||
def close_sockets(self):
|
||||
for serversocket, _ in self.ports.itervalues():
|
||||
self.rawserver.stop_listening(serversocket)
|
||||
serversocket.close()
|
||||
|
||||
def add_torrent(self, infohash, encoder):
|
||||
if infohash in self.torrents:
|
||||
raise BTFailure(_("Can't start two separate instances of the same "
|
||||
"torrent"))
|
||||
self.torrents[infohash] = encoder
|
||||
|
||||
def remove_torrent(self, infohash):
|
||||
del self.torrents[infohash]
|
||||
|
||||
def select_torrent(self, conn, infohash):
|
||||
if infohash in self.torrents:
|
||||
self.torrents[infohash].singleport_connection(self, conn)
|
||||
|
||||
def connection_made(self, connection):
|
||||
con = Connection(self, connection, None, False)
|
||||
self.connections[connection] = con
|
||||
|
||||
def replace_connection(self):
|
||||
pass
|
773
BitTorrent/GUI.py
Executable file
773
BitTorrent/GUI.py
Executable file
@ -0,0 +1,773 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# written by Matt Chisholm
|
||||
|
||||
from __future__ import division
|
||||
|
||||
import gtk
|
||||
import pango
|
||||
import gobject
|
||||
import os
|
||||
import threading
|
||||
|
||||
assert gtk.gtk_version >= (2, 2), "GTK 2.2 or newer required"
|
||||
assert gtk.pygtk_version >= (2, 2), "PyGTK 2.2 or newer required"
|
||||
|
||||
from BitTorrent import app_name, FAQ_URL, languages, language_names
|
||||
from BitTorrent.platform import image_root, read_language_file, write_language_file
|
||||
|
||||
def lock_wrap(function, *args):
|
||||
gtk.threads_enter()
|
||||
function(*args)
|
||||
gtk.threads_leave()
|
||||
|
||||
def gtk_wrap(function, *args):
|
||||
gobject.idle_add(lock_wrap, function, *args)
|
||||
|
||||
SPACING = 8
|
||||
WINDOW_TITLE_LENGTH = 128 # do we need this?
|
||||
WINDOW_WIDTH = 600
|
||||
|
||||
# get screen size from GTK
|
||||
d = gtk.gdk.display_get_default()
|
||||
s = d.get_default_screen()
|
||||
MAX_WINDOW_HEIGHT = s.get_height()
|
||||
MAX_WINDOW_WIDTH = s.get_width()
|
||||
if os.name == 'nt':
|
||||
MAX_WINDOW_HEIGHT -= 32 # leave room for start bar (exact)
|
||||
MAX_WINDOW_HEIGHT -= 32 # and window decorations (depends on windows theme)
|
||||
else:
|
||||
MAX_WINDOW_HEIGHT -= 32 # leave room for window decorations (could be any size)
|
||||
|
||||
|
||||
MIN_MULTI_PANE_HEIGHT = 107
|
||||
|
||||
BT_TARGET_TYPE = 0
|
||||
EXTERNAL_FILE_TYPE = 1
|
||||
EXTERNAL_STRING_TYPE = 2
|
||||
|
||||
BT_TARGET = ("application/x-bittorrent" , gtk.TARGET_SAME_APP, BT_TARGET_TYPE )
|
||||
EXTERNAL_FILE = ("text/uri-list" , 0 , EXTERNAL_FILE_TYPE )
|
||||
|
||||
#gtk(gdk actually) is totally unable to receive text drags
|
||||
#of any sort in windows because they're too lazy to use OLE.
|
||||
#this list is all the atoms I could possibly find so that
|
||||
#url dnd works on linux from any browser.
|
||||
EXTERNAL_TEXTPLAIN = ("text/plain" , 0 , EXTERNAL_STRING_TYPE)
|
||||
EXTERNAL_TEXT = ("TEXT" , 0 , EXTERNAL_STRING_TYPE)
|
||||
EXTERNAL_COMPOUND_TEXT = ("COMPOUND_TEXT" , 0 , EXTERNAL_STRING_TYPE)
|
||||
EXTERNAL_MOZILLA = ("text/x-moz-url" , 0 , EXTERNAL_STRING_TYPE)
|
||||
EXTERNAL_NETSCAPE = ("_NETSCAPE_URL" , 0 , EXTERNAL_STRING_TYPE)
|
||||
EXTERNAL_HTML = ("text/html" , 0 , EXTERNAL_STRING_TYPE)
|
||||
EXTERNAL_UNICODE = ("text/unicode" , 0 , EXTERNAL_STRING_TYPE)
|
||||
EXTERNAL_UTF8 = ("text/plain;charset=utf-8" , 0 , EXTERNAL_STRING_TYPE)
|
||||
EXTERNAL_UTF8_STRING = ("UTF8_STRING" , 0 , EXTERNAL_STRING_TYPE)
|
||||
EXTERNAL_STRING = ("STRING" , 0 , EXTERNAL_STRING_TYPE)
|
||||
EXTERNAL_OLE2_DND = ("OLE2_DND" , 0 , EXTERNAL_STRING_TYPE)
|
||||
EXTERNAL_RTF = ("Rich Text Format" , 0 , EXTERNAL_STRING_TYPE)
|
||||
#there should alse be text/plain;charset={current charset}
|
||||
|
||||
TARGET_EXTERNAL = [EXTERNAL_FILE,
|
||||
EXTERNAL_TEXTPLAIN,
|
||||
EXTERNAL_TEXT,
|
||||
EXTERNAL_COMPOUND_TEXT,
|
||||
EXTERNAL_MOZILLA,
|
||||
EXTERNAL_NETSCAPE,
|
||||
EXTERNAL_HTML,
|
||||
EXTERNAL_UNICODE,
|
||||
EXTERNAL_UTF8,
|
||||
EXTERNAL_UTF8_STRING,
|
||||
EXTERNAL_STRING,
|
||||
EXTERNAL_OLE2_DND,
|
||||
EXTERNAL_RTF]
|
||||
|
||||
TARGET_ALL = [BT_TARGET,
|
||||
EXTERNAL_FILE,
|
||||
EXTERNAL_TEXTPLAIN,
|
||||
EXTERNAL_TEXT,
|
||||
EXTERNAL_COMPOUND_TEXT,
|
||||
EXTERNAL_MOZILLA,
|
||||
EXTERNAL_NETSCAPE,
|
||||
EXTERNAL_HTML,
|
||||
EXTERNAL_UNICODE,
|
||||
EXTERNAL_UTF8,
|
||||
EXTERNAL_UTF8_STRING,
|
||||
EXTERNAL_STRING,
|
||||
EXTERNAL_OLE2_DND,
|
||||
EXTERNAL_RTF]
|
||||
|
||||
# a slightly hackish but very reliable way to get OS scrollbar width
|
||||
sw = gtk.ScrolledWindow()
|
||||
SCROLLBAR_WIDTH = sw.size_request()[0] - 48
|
||||
del sw
|
||||
|
||||
def align(obj,x,y):
|
||||
if type(obj) is gtk.Label:
|
||||
obj.set_alignment(x,y)
|
||||
return obj
|
||||
else:
|
||||
a = gtk.Alignment(x,y,0,0)
|
||||
a.add(obj)
|
||||
return a
|
||||
|
||||
def halign(obj, amt):
|
||||
return align(obj,amt,0.5)
|
||||
|
||||
def lalign(obj):
|
||||
return halign(obj,0)
|
||||
|
||||
def ralign(obj):
|
||||
return halign(obj,1)
|
||||
|
||||
def valign(obj, amt):
|
||||
return align(obj,0.5,amt)
|
||||
|
||||
def malign(obj):
|
||||
return valign(obj, 0.5)
|
||||
|
||||
factory = gtk.IconFactory()
|
||||
|
||||
# these don't seem to be documented anywhere:
|
||||
# ICON_SIZE_BUTTON = 20x20
|
||||
# ICON_SIZE_LARGE_TOOLBAR = 24x24
|
||||
|
||||
for n in 'broken finished info pause paused play queued running remove status-running status-natted status-stopped'.split():
|
||||
fn = os.path.join(image_root, ("%s.png"%n))
|
||||
|
||||
pixbuf = gtk.gdk.pixbuf_new_from_file(fn)
|
||||
|
||||
set = gtk.IconSet(pixbuf)
|
||||
|
||||
factory.add('bt-%s'%n, set)
|
||||
|
||||
factory.add_default()
|
||||
|
||||
def load_large_toolbar_image(image, stockname):
|
||||
# This is a hack to work around a bug in GTK 2.4 that causes
|
||||
# gtk.ICON_SIZE_LARGE_TOOLBAR icons to be drawn at 18x18 instead
|
||||
# of 24x24 under GTK 2.4 & win32
|
||||
if os.name == 'nt' and gtk.gtk_version < (2, 6):
|
||||
image.set_from_file(os.path.join(image_root, stockname[3:]+'.png'))
|
||||
else:
|
||||
image.set_from_stock(stockname, gtk.ICON_SIZE_LARGE_TOOLBAR)
|
||||
|
||||
|
||||
def get_logo(size=32):
|
||||
fn = os.path.join(image_root, 'logo', 'bittorrent_%d.png'%size)
|
||||
logo = gtk.Image()
|
||||
logo.set_from_file(fn)
|
||||
return logo
|
||||
|
||||
class Size(long):
|
||||
"""displays size in human-readable format"""
|
||||
size_labels = ['','K','M','G','T','P','E','Z','Y']
|
||||
radix = 2**10
|
||||
|
||||
def __new__(cls, value, precision=None):
|
||||
self = long.__new__(cls, value)
|
||||
return self
|
||||
|
||||
def __init__(self, value, precision=0):
|
||||
long.__init__(self, value)
|
||||
self.precision = precision
|
||||
|
||||
def __str__(self, precision=None):
|
||||
if precision is None:
|
||||
precision = self.precision
|
||||
value = self
|
||||
for unitname in self.size_labels:
|
||||
if value < self.radix and precision < self.radix:
|
||||
break
|
||||
value /= self.radix
|
||||
precision /= self.radix
|
||||
if unitname and value < 10 and precision < 1:
|
||||
return '%.1f %sB' % (value, unitname)
|
||||
else:
|
||||
return '%.0f %sB' % (value, unitname)
|
||||
|
||||
|
||||
class Rate(Size):
|
||||
"""displays rate in human-readable format"""
|
||||
def __init__(self, value, precision=2**10):
|
||||
Size.__init__(self, value, precision)
|
||||
|
||||
def __str__(self, precision=None):
|
||||
return '%s/s'% Size.__str__(self, precision=None)
|
||||
|
||||
|
||||
class Duration(float):
|
||||
"""displays duration in human-readable format"""
|
||||
def __str__(self):
|
||||
if self > 365 * 24 * 60 * 60:
|
||||
return '?'
|
||||
elif self >= 172800:
|
||||
return _("%d days") % (self//86400) # 2 days or longer
|
||||
elif self >= 86400:
|
||||
return _("1 day %d hours") % ((self-86400)//3600) # 1-2 days
|
||||
elif self >= 3600:
|
||||
return _("%d:%02d hours") % (self//3600, (self%3600)//60) # 1 h - 1 day
|
||||
elif self >= 60:
|
||||
return _("%d:%02d minutes") % (self//60, self%60) # 1 minute to 1 hour
|
||||
elif self >= 0:
|
||||
return _("%d seconds") % int(self)
|
||||
else:
|
||||
return _("0 seconds")
|
||||
|
||||
|
||||
class FancyLabel(gtk.Label):
|
||||
def __init__(self, label_string, *values):
|
||||
self.label_string = label_string
|
||||
gtk.Label.__init__(self, label_string%values)
|
||||
|
||||
def set_value(self, *values):
|
||||
self.set_text(self.label_string%values)
|
||||
|
||||
|
||||
class IconButton(gtk.Button):
|
||||
def __init__(self, label, iconpath=None, stock=None):
|
||||
gtk.Button.__init__(self)
|
||||
|
||||
self.hbox = gtk.HBox(spacing=5)
|
||||
|
||||
self.icon = gtk.Image()
|
||||
if stock is not None:
|
||||
self.icon.set_from_stock(stock, gtk.ICON_SIZE_BUTTON)
|
||||
elif iconpath is not None:
|
||||
self.icon.set_from_file(iconpath)
|
||||
else:
|
||||
raise TypeError, 'IconButton needs iconpath or stock'
|
||||
self.hbox.pack_start(self.icon)
|
||||
|
||||
self.label = gtk.Label(label)
|
||||
self.hbox.pack_start(self.label)
|
||||
|
||||
self.add(halign(self.hbox, 0.5))
|
||||
|
||||
|
||||
class LanguageChooser(gtk.Frame):
|
||||
def __init__(self):
|
||||
gtk.Frame.__init__(self, "Translate %s into:" % app_name)
|
||||
self.set_border_width(SPACING)
|
||||
|
||||
model = gtk.ListStore(*[gobject.TYPE_STRING] * 2)
|
||||
default = model.append(("System default", ''))
|
||||
|
||||
lang = read_language_file()
|
||||
for l in languages:
|
||||
it = model.append((language_names[l].encode('utf8'), l))
|
||||
if l == lang:
|
||||
default = it
|
||||
|
||||
self.combo = gtk.ComboBox(model)
|
||||
cell = gtk.CellRendererText()
|
||||
self.combo.pack_start(cell, True)
|
||||
self.combo.add_attribute(cell, 'text', 0)
|
||||
|
||||
if default is not None:
|
||||
self.combo.set_active_iter(default)
|
||||
|
||||
self.combo.connect('changed', self.changed)
|
||||
box = gtk.VBox(spacing=SPACING)
|
||||
box.set_border_width(SPACING)
|
||||
box.pack_start(self.combo, expand=False, fill=False)
|
||||
l = gtk.Label("You must restart %s for the\nlanguage "
|
||||
"setting to take effect." % app_name)
|
||||
l.set_alignment(0,1)
|
||||
l.set_line_wrap(True)
|
||||
box.pack_start(l, expand=False, fill=False)
|
||||
self.add(box)
|
||||
|
||||
def changed(self, *a):
|
||||
it = self.combo.get_active_iter()
|
||||
model = self.combo.get_model()
|
||||
code = model.get(it, 1)[0]
|
||||
write_language_file(code)
|
||||
|
||||
|
||||
class Window(gtk.Window):
|
||||
def __init__(self, *args):
|
||||
apply(gtk.Window.__init__, (self,)+args)
|
||||
iconname = os.path.join(image_root,'bittorrent.ico')
|
||||
icon16 = gtk.gdk.pixbuf_new_from_file_at_size(iconname, 16, 16)
|
||||
icon32 = gtk.gdk.pixbuf_new_from_file_at_size(iconname, 32, 32)
|
||||
self.set_icon_list(icon16, icon32)
|
||||
|
||||
|
||||
class HelpWindow(Window):
|
||||
def __init__(self, main, helptext):
|
||||
Window.__init__(self)
|
||||
self.set_title(_("%s Help")%app_name)
|
||||
self.main = main
|
||||
self.set_border_width(SPACING)
|
||||
|
||||
self.vbox = gtk.VBox(spacing=SPACING)
|
||||
|
||||
self.faq_box = gtk.HBox(spacing=SPACING)
|
||||
self.faq_box.pack_start(gtk.Label(_("Frequently Asked Questions:")), expand=False, fill=False)
|
||||
self.faq_url = gtk.Entry()
|
||||
self.faq_url.set_text(FAQ_URL)
|
||||
self.faq_url.set_editable(False)
|
||||
self.faq_box.pack_start(self.faq_url, expand=True, fill=True)
|
||||
self.faq_button = gtk.Button(_("Go"))
|
||||
self.faq_button.connect('clicked', lambda w: self.main.visit_url(FAQ_URL) )
|
||||
self.faq_box.pack_start(self.faq_button, expand=False, fill=False)
|
||||
self.vbox.pack_start(self.faq_box, expand=False, fill=False)
|
||||
|
||||
self.cmdline_args = gtk.Label(helptext)
|
||||
|
||||
self.cmdline_sw = ScrolledWindow()
|
||||
self.cmdline_sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
|
||||
self.cmdline_sw.add_with_viewport(self.cmdline_args)
|
||||
|
||||
self.cmdline_sw.set_size_request(self.cmdline_args.size_request()[0]+SCROLLBAR_WIDTH, 200)
|
||||
|
||||
self.vbox.pack_start(self.cmdline_sw)
|
||||
|
||||
self.add(self.vbox)
|
||||
|
||||
self.show_all()
|
||||
|
||||
if self.main is not None:
|
||||
self.connect('destroy', lambda w: self.main.window_closed('help'))
|
||||
else:
|
||||
self.connect('destroy', lambda w: gtk.main_quit())
|
||||
gtk.main()
|
||||
|
||||
|
||||
|
||||
def close(self, widget=None):
|
||||
self.destroy()
|
||||
|
||||
|
||||
class ScrolledWindow(gtk.ScrolledWindow):
|
||||
def scroll_to_bottom(self):
|
||||
child_height = self.child.child.size_request()[1]
|
||||
self.scroll_to(0, child_height)
|
||||
|
||||
def scroll_by(self, dx=0, dy=0):
|
||||
v = self.get_vadjustment()
|
||||
new_y = min(v.upper, v.value + dy)
|
||||
self.scroll_to(0, new_y)
|
||||
|
||||
def scroll_to(self, x=0, y=0):
|
||||
v = self.get_vadjustment()
|
||||
child_height = self.child.child.size_request()[1]
|
||||
new_adj = gtk.Adjustment(y, 0, child_height)
|
||||
self.set_vadjustment(new_adj)
|
||||
|
||||
|
||||
class AutoScrollingWindow(ScrolledWindow):
|
||||
def __init__(self):
|
||||
ScrolledWindow.__init__(self)
|
||||
self.drag_dest_set(gtk.DEST_DEFAULT_MOTION |
|
||||
gtk.DEST_DEFAULT_DROP,
|
||||
TARGET_ALL,
|
||||
gtk.gdk.ACTION_MOVE|gtk.gdk.ACTION_COPY)
|
||||
self.connect('drag_motion' , self.drag_motion )
|
||||
# self.connect('drag_data_received', self.drag_data_received)
|
||||
self.vscrolltimeout = None
|
||||
|
||||
# def drag_data_received(self, widget, context, x, y, selection, targetType, time):
|
||||
# print _("AutoScrollingWindow.drag_data_received("), widget
|
||||
|
||||
def drag_motion(self, widget, context, x, y, time):
|
||||
v = self.get_vadjustment()
|
||||
if v.page_size - y <= 10:
|
||||
amount = (10 - int(v.page_size - y)) * 2
|
||||
self.start_scrolling(amount)
|
||||
elif y <= 10:
|
||||
amount = (y - 10) * 2
|
||||
self.start_scrolling(amount)
|
||||
else:
|
||||
self.stop_scrolling()
|
||||
return True
|
||||
|
||||
def scroll_and_wait(self, amount, lock_held):
|
||||
if not lock_held:
|
||||
gtk.threads_enter()
|
||||
self.scroll_by(0, amount)
|
||||
if not lock_held:
|
||||
gtk.threads_leave()
|
||||
if self.vscrolltimeout is not None:
|
||||
gobject.source_remove(self.vscrolltimeout)
|
||||
self.vscrolltimeout = gobject.timeout_add(100, self.scroll_and_wait, amount, False)
|
||||
#print "adding timeout", self.vscrolltimeout, amount
|
||||
|
||||
def start_scrolling(self, amount):
|
||||
if self.vscrolltimeout is not None:
|
||||
gobject.source_remove(self.vscrolltimeout)
|
||||
self.scroll_and_wait(amount, True)
|
||||
|
||||
def stop_scrolling(self):
|
||||
if self.vscrolltimeout is not None:
|
||||
#print "removing timeout", self.vscrolltimeout
|
||||
gobject.source_remove(self.vscrolltimeout)
|
||||
self.vscrolltimeout = None
|
||||
|
||||
class MessageDialog(gtk.MessageDialog):
|
||||
flags = gtk.DIALOG_MODAL|gtk.DIALOG_DESTROY_WITH_PARENT
|
||||
|
||||
def __init__(self, parent, title, message,
|
||||
type=gtk.MESSAGE_ERROR,
|
||||
buttons=gtk.BUTTONS_OK,
|
||||
yesfunc=None, nofunc=None,
|
||||
default=gtk.RESPONSE_OK
|
||||
):
|
||||
gtk.MessageDialog.__init__(self, parent,
|
||||
self.flags,
|
||||
type, buttons, message)
|
||||
|
||||
self.set_size_request(-1, -1)
|
||||
self.set_resizable(False)
|
||||
self.set_title(title)
|
||||
if default is not None:
|
||||
self.set_default_response(default)
|
||||
|
||||
self.label.set_line_wrap(True)
|
||||
|
||||
self.connect('response', self.callback)
|
||||
|
||||
self.yesfunc = yesfunc
|
||||
self.nofunc = nofunc
|
||||
if os.name == 'nt':
|
||||
parent.present()
|
||||
self.show_all()
|
||||
|
||||
def callback(self, widget, response_id, *args):
|
||||
if ((response_id == gtk.RESPONSE_OK or
|
||||
response_id == gtk.RESPONSE_YES) and
|
||||
self.yesfunc is not None):
|
||||
self.yesfunc()
|
||||
if ((response_id == gtk.RESPONSE_CANCEL or
|
||||
response_id == gtk.RESPONSE_NO )
|
||||
and self.nofunc is not None):
|
||||
self.nofunc()
|
||||
self.destroy()
|
||||
|
||||
class ErrorMessageDialog(MessageDialog):
|
||||
flags = gtk.DIALOG_DESTROY_WITH_PARENT
|
||||
|
||||
|
||||
if gtk.pygtk_version < (2, 4, 1):
|
||||
|
||||
class FileSelection(gtk.FileSelection):
|
||||
|
||||
def __init__(self, main, title='', fullname='', got_location_func=None, no_location_func=None, got_multiple_location_func=None, show=True):
|
||||
gtk.FileSelection.__init__(self)
|
||||
from BitTorrent.ConvertedMetainfo import filesystem_encoding
|
||||
self.fsenc = filesystem_encoding
|
||||
try:
|
||||
fullname.decode('utf8')
|
||||
except:
|
||||
fullname = fullname.decode(self.fsenc)
|
||||
self.main = main
|
||||
self.set_modal(True)
|
||||
self.set_destroy_with_parent(True)
|
||||
self.set_title(title)
|
||||
if (got_location_func is None and
|
||||
got_multiple_location_func is not None):
|
||||
self.set_select_multiple(True)
|
||||
self.got_location_func = got_location_func
|
||||
self.no_location_func = no_location_func
|
||||
self.got_multiple_location_func = got_multiple_location_func
|
||||
self.cancel_button.connect("clicked", self.destroy)
|
||||
self.d_handle = self.connect('destroy', self.no_location)
|
||||
self.ok_button.connect("clicked", self.done)
|
||||
self.set_filename(fullname)
|
||||
if show:
|
||||
self.show()
|
||||
|
||||
def no_location(self, widget=None):
|
||||
if self.no_location_func is not None:
|
||||
self.no_location_func()
|
||||
|
||||
def done(self, widget=None):
|
||||
if self.get_select_multiple():
|
||||
self.got_multiple_location()
|
||||
else:
|
||||
self.got_location()
|
||||
self.disconnect(self.d_handle)
|
||||
self.destroy()
|
||||
|
||||
def got_location(self):
|
||||
if self.got_location_func is not None:
|
||||
name = self.get_filename()
|
||||
self.got_location_func(name)
|
||||
|
||||
def got_multiple_location(self):
|
||||
if self.got_multiple_location_func is not None:
|
||||
names = self.get_selections()
|
||||
self.got_multiple_location_func(names)
|
||||
|
||||
def destroy(self, widget=None):
|
||||
gtk.FileSelection.destroy(self)
|
||||
|
||||
def close_child_windows(self):
|
||||
self.no_location()
|
||||
|
||||
def close(self, widget=None):
|
||||
self.destroy()
|
||||
|
||||
class OpenFileSelection(FileSelection):
|
||||
pass
|
||||
|
||||
class SaveFileSelection(FileSelection):
|
||||
pass
|
||||
|
||||
class ChooseFolderSelection(FileSelection):
|
||||
pass
|
||||
|
||||
class CreateFolderSelection(FileSelection):
|
||||
pass
|
||||
|
||||
class FileOrFolderSelection(FileSelection):
|
||||
pass
|
||||
|
||||
else:
|
||||
|
||||
class FileSelection(gtk.FileChooserDialog):
|
||||
|
||||
def __init__(self, action, main, title='', fullname='',
|
||||
got_location_func=None, no_location_func=None,
|
||||
got_multiple_location_func=None, show=True):
|
||||
gtk.FileChooserDialog.__init__(self, action=action, title=title,
|
||||
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
|
||||
gtk.STOCK_OK, gtk.RESPONSE_OK))
|
||||
from BitTorrent.ConvertedMetainfo import filesystem_encoding
|
||||
self.fsenc = filesystem_encoding
|
||||
try:
|
||||
fullname.decode('utf8')
|
||||
except:
|
||||
fullname = fullname.decode(self.fsenc)
|
||||
self.set_default_response(gtk.RESPONSE_OK)
|
||||
if action == gtk.FILE_CHOOSER_ACTION_CREATE_FOLDER:
|
||||
self.convert_button_box = gtk.HBox()
|
||||
self.convert_button = gtk.Button(_("Choose an existing folder..."))
|
||||
self.convert_button.connect('clicked', self.change_action)
|
||||
self.convert_button_box.pack_end(self.convert_button,
|
||||
expand=False,
|
||||
fill=False)
|
||||
self.convert_button_box.show_all()
|
||||
self.set_extra_widget(self.convert_button_box)
|
||||
elif action == gtk.FILE_CHOOSER_ACTION_OPEN:
|
||||
self.all_filter = gtk.FileFilter()
|
||||
self.all_filter.add_pattern('*')
|
||||
self.all_filter.set_name(_("All Files"))
|
||||
self.add_filter(self.all_filter)
|
||||
self.torrent_filter = gtk.FileFilter()
|
||||
self.torrent_filter.add_pattern('*.torrent')
|
||||
self.torrent_filter.add_mime_type('application/x-bittorrent')
|
||||
self.torrent_filter.set_name(_("Torrents"))
|
||||
self.add_filter(self.torrent_filter)
|
||||
self.set_filter(self.torrent_filter)
|
||||
|
||||
self.main = main
|
||||
self.set_modal(True)
|
||||
self.set_destroy_with_parent(True)
|
||||
if fullname:
|
||||
if action == gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER:
|
||||
if gtk.gtk_version < (2,6):
|
||||
fullname = fullname.encode(self.fsenc)
|
||||
self.set_filename(fullname)
|
||||
elif action == gtk.FILE_CHOOSER_ACTION_OPEN:
|
||||
if fullname[-1] != os.sep:
|
||||
fullname = fullname + os.sep
|
||||
path, filename = os.path.split(fullname)
|
||||
if gtk.gtk_version < (2,6):
|
||||
path = path.encode(self.fsenc)
|
||||
self.set_current_folder(path)
|
||||
else:
|
||||
if fullname[-1] == os.sep:
|
||||
fullname = fullname[:-1]
|
||||
path, filename = os.path.split(fullname)
|
||||
if gtk.gtk_version < (2,8):
|
||||
path = path.encode(self.fsenc)
|
||||
self.set_current_folder(path)
|
||||
self.set_current_name(filename)
|
||||
if got_multiple_location_func is not None:
|
||||
self.got_multiple_location_func = got_multiple_location_func
|
||||
self.set_select_multiple(True)
|
||||
self.got_location_func = got_location_func
|
||||
self.no_location_func = no_location_func
|
||||
self.connect('response', self.got_response)
|
||||
self.d_handle = self.connect('destroy', self.got_response,
|
||||
gtk.RESPONSE_CANCEL)
|
||||
if show:
|
||||
self.show()
|
||||
|
||||
def change_action(self, widget):
|
||||
if self.get_action() == gtk.FILE_CHOOSER_ACTION_CREATE_FOLDER:
|
||||
self.convert_button.set_label(_("Create a new folder..."))
|
||||
self.set_action(gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER)
|
||||
elif self.get_action() == gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER:
|
||||
self.convert_button.set_label(_("Choose an existing folder..."))
|
||||
self.set_action(gtk.FILE_CHOOSER_ACTION_CREATE_FOLDER)
|
||||
|
||||
def got_response(self, widget, response):
|
||||
if response == gtk.RESPONSE_OK:
|
||||
if self.get_select_multiple():
|
||||
if self.got_multiple_location_func is not None:
|
||||
self.got_multiple_location_func(self.get_filenames())
|
||||
elif self.got_location_func is not None:
|
||||
fn = self.get_filename()
|
||||
if fn:
|
||||
self.got_location_func(fn)
|
||||
else:
|
||||
self.no_location_func()
|
||||
else:
|
||||
if self.no_location_func is not None:
|
||||
self.no_location_func()
|
||||
self.disconnect(self.d_handle)
|
||||
self.destroy()
|
||||
|
||||
def done(self, widget=None):
|
||||
if self.get_select_multiple():
|
||||
self.got_multiple_location()
|
||||
else:
|
||||
self.got_location()
|
||||
self.disconnect(self.d_handle)
|
||||
self.destroy()
|
||||
|
||||
def close_child_windows(self):
|
||||
self.destroy()
|
||||
|
||||
def close(self, widget=None):
|
||||
self.destroy()
|
||||
|
||||
|
||||
class OpenFileSelection(FileSelection):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
FileSelection.__init__(self, gtk.FILE_CHOOSER_ACTION_OPEN, *args,
|
||||
**kwargs)
|
||||
|
||||
|
||||
class SaveFileSelection(FileSelection):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
FileSelection.__init__(self, gtk.FILE_CHOOSER_ACTION_SAVE, *args,
|
||||
**kwargs)
|
||||
|
||||
|
||||
class ChooseFolderSelection(FileSelection):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
FileSelection.__init__(self, gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
|
||||
*args, **kwargs)
|
||||
|
||||
class CreateFolderSelection(FileSelection):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
FileSelection.__init__(self, gtk.FILE_CHOOSER_ACTION_CREATE_FOLDER,
|
||||
*args, **kwargs)
|
||||
|
||||
|
||||
class FileOrFolderSelection(FileSelection):
|
||||
def __init__(self, *args, **kwargs):
|
||||
FileSelection.__init__(self, gtk.FILE_CHOOSER_ACTION_OPEN, *args,
|
||||
**kwargs)
|
||||
self.select_file = _("Select a file" )
|
||||
self.select_folder = _("Select a folder")
|
||||
self.convert_button_box = gtk.HBox()
|
||||
self.convert_button = gtk.Button(self.select_folder)
|
||||
self.convert_button.connect('clicked', self.change_action)
|
||||
self.convert_button_box.pack_end(self.convert_button,
|
||||
expand=False,
|
||||
fill=False)
|
||||
self.convert_button_box.show_all()
|
||||
self.set_extra_widget(self.convert_button_box)
|
||||
self.reset_by_action()
|
||||
self.set_filter(self.all_filter)
|
||||
|
||||
|
||||
def change_action(self, widget):
|
||||
if self.get_action() == gtk.FILE_CHOOSER_ACTION_OPEN:
|
||||
self.set_action(gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER)
|
||||
elif self.get_action() == gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER:
|
||||
self.set_action(gtk.FILE_CHOOSER_ACTION_OPEN)
|
||||
self.reset_by_action()
|
||||
|
||||
def reset_by_action(self):
|
||||
if self.get_action() == gtk.FILE_CHOOSER_ACTION_OPEN:
|
||||
self.convert_button.set_label(self.select_folder)
|
||||
self.set_title(self.select_file)
|
||||
elif self.get_action() == gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER:
|
||||
self.convert_button.set_label(self.select_file)
|
||||
self.set_title(self.select_folder)
|
||||
|
||||
def set_title(self, title):
|
||||
mytitle = title + ':'
|
||||
FileSelection.set_title(self, mytitle)
|
||||
|
||||
|
||||
class PaddedHSeparator(gtk.VBox):
|
||||
def __init__(self, spacing=SPACING):
|
||||
gtk.VBox.__init__(self)
|
||||
self.sep = gtk.HSeparator()
|
||||
self.pack_start(self.sep, expand=False, fill=False, padding=spacing)
|
||||
self.show_all()
|
||||
|
||||
|
||||
class HSeparatedBox(gtk.VBox):
|
||||
|
||||
def new_separator(self):
|
||||
return PaddedHSeparator()
|
||||
|
||||
def _get_children(self):
|
||||
return gtk.VBox.get_children(self)
|
||||
|
||||
def get_children(self):
|
||||
return self._get_children()[0::2]
|
||||
|
||||
def _reorder_child(self, child, index):
|
||||
gtk.VBox.reorder_child(self, child, index)
|
||||
|
||||
def reorder_child(self, child, index):
|
||||
children = self._get_children()
|
||||
oldindex = children.index(child)
|
||||
sep = None
|
||||
if oldindex == len(children) - 1:
|
||||
sep = children[oldindex-1]
|
||||
else:
|
||||
sep = children[oldindex+1]
|
||||
|
||||
newindex = index*2
|
||||
if newindex == len(children) -1:
|
||||
self._reorder_child(sep, newindex-1)
|
||||
self._reorder_child(child, newindex)
|
||||
else:
|
||||
self._reorder_child(child, newindex)
|
||||
self._reorder_child(sep, newindex+1)
|
||||
|
||||
def pack_start(self, widget, *args, **kwargs):
|
||||
if len(self._get_children()):
|
||||
s = self.new_separator()
|
||||
gtk.VBox.pack_start(self, s, *args, **kwargs)
|
||||
s.show()
|
||||
gtk.VBox.pack_start(self, widget, *args, **kwargs)
|
||||
|
||||
def pack_end(self, widget, *args, **kwargs):
|
||||
if len(self._get_children()):
|
||||
s = self.new_separator()
|
||||
gtk.VBox.pack_start(self, s, *args, **kwargs)
|
||||
s.show()
|
||||
gtk.VBox.pack_end(self, widget, *args, **kwargs)
|
||||
|
||||
def remove(self, widget):
|
||||
children = self._get_children()
|
||||
if len(children) > 1:
|
||||
index = children.index(widget)
|
||||
if index == 0:
|
||||
sep = children[index+1]
|
||||
else:
|
||||
sep = children[index-1]
|
||||
sep.destroy()
|
||||
gtk.VBox.remove(self, widget)
|
82
BitTorrent/GetTorrent.py
Executable file
82
BitTorrent/GetTorrent.py
Executable file
@ -0,0 +1,82 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# GetTorrent -- abstraction which can get a .torrent file from multiple
|
||||
# sources: local file, url, etc.
|
||||
|
||||
# written by Matt Chisholm
|
||||
|
||||
import os
|
||||
import re
|
||||
import zurllib
|
||||
from bencode import bdecode
|
||||
from BitTorrent.platform import get_cache_dir
|
||||
|
||||
urlpat = re.compile('^\w+://')
|
||||
|
||||
def get_quietly(arg):
|
||||
(data, errors) = get(arg)
|
||||
# If there's an error opening a file from the IE cache,
|
||||
# act like we simply didn't get a file (because we didn't)
|
||||
if errors:
|
||||
cache = get_cache_dir()
|
||||
if (cache is not None) and (cache in arg):
|
||||
errors = []
|
||||
return data, errors
|
||||
|
||||
def get(arg):
|
||||
data = None
|
||||
errors = []
|
||||
if os.access(arg, os.F_OK):
|
||||
data, errors = get_file(arg)
|
||||
elif urlpat.match(arg):
|
||||
data, errors = get_url(arg)
|
||||
else:
|
||||
errors.append(_("Could not read %s") % arg)
|
||||
return data, errors
|
||||
|
||||
|
||||
def get_url(url):
|
||||
data = None
|
||||
errors = []
|
||||
err_str = _("Could not download or open \n%s\n"
|
||||
"Try using a web browser to download the torrent file.") % url
|
||||
u = None
|
||||
try:
|
||||
u = zurllib.urlopen(url)
|
||||
data = u.read()
|
||||
u.close()
|
||||
b = bdecode(data)
|
||||
except Exception, e:
|
||||
if u is not None:
|
||||
u.close()
|
||||
errors.append(err_str + "\n(%s)" % e)
|
||||
data = None
|
||||
else:
|
||||
if u is not None:
|
||||
u.close()
|
||||
|
||||
return data, errors
|
||||
|
||||
|
||||
def get_file(filename):
|
||||
data = None
|
||||
errors = []
|
||||
f = None
|
||||
try:
|
||||
f = file(filename, 'rb')
|
||||
data = f.read()
|
||||
f.close()
|
||||
except Exception, e:
|
||||
if f is not None:
|
||||
f.close()
|
||||
errors.append((_("Could not read %s") % filename) + (': %s' % str(e)))
|
||||
|
||||
return data, errors
|
188
BitTorrent/HTTPHandler.py
Executable file
188
BitTorrent/HTTPHandler.py
Executable file
@ -0,0 +1,188 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Bram Cohen
|
||||
|
||||
from RawServer_magic import Handler
|
||||
from cStringIO import StringIO
|
||||
from sys import stdout
|
||||
import time
|
||||
from gzip import GzipFile
|
||||
|
||||
DEBUG = False
|
||||
|
||||
weekdays = [_("Mon"), _("Tue"), _("Wed"), _("Thu"), _("Fri"), _("Sat"), _("Sun")]
|
||||
|
||||
months = [None, _("Jan"), _("Feb"), _("Mar"), _("Apr"), _("May"), _("Jun"),
|
||||
_("Jul"), _("Aug"), _("Sep"), _("Oct"), _("Nov"), _("Dec")]
|
||||
|
||||
|
||||
class HTTPConnection(object):
|
||||
|
||||
def __init__(self, handler, connection):
|
||||
self.handler = handler
|
||||
self.connection = connection
|
||||
self.buf = ''
|
||||
self.closed = False
|
||||
self.done = False
|
||||
self.donereading = False
|
||||
self.next_func = self.read_type
|
||||
|
||||
def get_ip(self):
|
||||
return self.connection.ip
|
||||
|
||||
def data_came_in(self, data):
|
||||
if self.donereading or self.next_func is None:
|
||||
return True
|
||||
self.buf += data
|
||||
while True:
|
||||
try:
|
||||
i = self.buf.index('\n')
|
||||
except ValueError:
|
||||
return True
|
||||
val = self.buf[:i]
|
||||
self.buf = self.buf[i+1:]
|
||||
self.next_func = self.next_func(val)
|
||||
if self.donereading:
|
||||
return True
|
||||
if self.next_func is None or self.closed:
|
||||
return False
|
||||
|
||||
def read_type(self, data):
|
||||
self.header = data.strip()
|
||||
words = data.split()
|
||||
if len(words) == 3:
|
||||
self.command, self.path, garbage = words
|
||||
self.pre1 = False
|
||||
elif len(words) == 2:
|
||||
self.command, self.path = words
|
||||
self.pre1 = True
|
||||
if self.command != 'GET':
|
||||
return None
|
||||
else:
|
||||
return None
|
||||
if self.command not in ('HEAD', 'GET'):
|
||||
return None
|
||||
self.headers = {}
|
||||
return self.read_header
|
||||
|
||||
def read_header(self, data):
|
||||
data = data.strip()
|
||||
if data == '':
|
||||
self.donereading = True
|
||||
# check for Accept-Encoding: header, pick a
|
||||
if self.headers.has_key('accept-encoding'):
|
||||
ae = self.headers['accept-encoding']
|
||||
if DEBUG:
|
||||
print "Got Accept-Encoding: " + ae + "\n"
|
||||
else:
|
||||
#identity assumed if no header
|
||||
ae = 'identity'
|
||||
# this eventually needs to support multple acceptable types
|
||||
# q-values and all that fancy HTTP crap
|
||||
# for now assume we're only communicating with our own client
|
||||
if ae.find('gzip') != -1:
|
||||
self.encoding = 'gzip'
|
||||
else:
|
||||
#default to identity.
|
||||
self.encoding = 'identity'
|
||||
r = self.handler.getfunc(self, self.path, self.headers)
|
||||
if r is not None:
|
||||
self.answer(r)
|
||||
return None
|
||||
try:
|
||||
i = data.index(':')
|
||||
except ValueError:
|
||||
return None
|
||||
self.headers[data[:i].strip().lower()] = data[i+1:].strip()
|
||||
if DEBUG:
|
||||
print data[:i].strip() + ": " + data[i+1:].strip()
|
||||
return self.read_header
|
||||
|
||||
def answer(self, (responsecode, responsestring, headers, data)):
|
||||
if self.closed:
|
||||
return
|
||||
if self.encoding == 'gzip':
|
||||
#transform data using gzip compression
|
||||
#this is nasty but i'm unsure of a better way at the moment
|
||||
compressed = StringIO()
|
||||
gz = GzipFile(fileobj = compressed, mode = 'wb', compresslevel = 9)
|
||||
gz.write(data)
|
||||
gz.close()
|
||||
compressed.seek(0,0)
|
||||
cdata = compressed.read()
|
||||
compressed.close()
|
||||
if len(cdata) >= len(data):
|
||||
self.encoding = 'identity'
|
||||
else:
|
||||
if DEBUG:
|
||||
print _("Compressed: %i Uncompressed: %i\n") % (len(cdata),len(data))
|
||||
data = cdata
|
||||
headers['Content-Encoding'] = 'gzip'
|
||||
|
||||
# i'm abusing the identd field here, but this should be ok
|
||||
if self.encoding == 'identity':
|
||||
ident = '-'
|
||||
else:
|
||||
ident = self.encoding
|
||||
username = '-'
|
||||
referer = self.headers.get('referer','-')
|
||||
useragent = self.headers.get('user-agent','-')
|
||||
year, month, day, hour, minute, second, a, b, c = time.localtime(time.time())
|
||||
print '%s %s %s [%02d/%3s/%04d:%02d:%02d:%02d] "%s" %i %i "%s" "%s"' % (
|
||||
self.connection.ip, ident, username, day, months[month], year, hour,
|
||||
minute, second, self.header, responsecode, len(data), referer, useragent)
|
||||
t = time.time()
|
||||
if t - self.handler.lastflush > self.handler.minflush:
|
||||
self.handler.lastflush = t
|
||||
stdout.flush()
|
||||
|
||||
self.done = True
|
||||
r = StringIO()
|
||||
r.write('HTTP/1.0 ' + str(responsecode) + ' ' +
|
||||
responsestring + '\r\n')
|
||||
if not self.pre1:
|
||||
headers['Content-Length'] = len(data)
|
||||
for key, value in headers.items():
|
||||
r.write(key + ': ' + str(value) + '\r\n')
|
||||
r.write('\r\n')
|
||||
if self.command != 'HEAD':
|
||||
r.write(data)
|
||||
self.connection.write(r.getvalue())
|
||||
if self.connection.is_flushed():
|
||||
self.connection.shutdown(1)
|
||||
|
||||
|
||||
class HTTPHandler(Handler):
|
||||
|
||||
def __init__(self, getfunc, minflush):
|
||||
self.connections = {}
|
||||
self.getfunc = getfunc
|
||||
self.minflush = minflush
|
||||
self.lastflush = time.time()
|
||||
|
||||
def connection_made(self, connection):
|
||||
self.connections[connection] = HTTPConnection(self, connection)
|
||||
|
||||
def connection_flushed(self, connection):
|
||||
if self.connections[connection].done:
|
||||
connection.shutdown(1)
|
||||
|
||||
def connection_lost(self, connection):
|
||||
ec = self.connections[connection]
|
||||
ec.closed = True
|
||||
del ec.connection
|
||||
del ec.next_func
|
||||
del self.connections[connection]
|
||||
|
||||
def data_came_in(self, connection, data):
|
||||
c = self.connections[connection]
|
||||
if not c.data_came_in(data) and not c.closed:
|
||||
c.connection.shutdown(1)
|
54
BitTorrent/LaunchPath.py
Executable file
54
BitTorrent/LaunchPath.py
Executable file
@ -0,0 +1,54 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# LaunchPath -- a cross platform way to "open," "launch," or "start"
|
||||
# files and directories
|
||||
|
||||
# written by Matt Chisholm
|
||||
|
||||
import os
|
||||
|
||||
can_launch_files = False
|
||||
posix_browsers = ('gnome-open','konqueror',) #gmc, gentoo only work on dirs
|
||||
default_posix_browser = ''
|
||||
|
||||
def launchpath_nt(path):
|
||||
os.startfile(path)
|
||||
|
||||
def launchpath_mac(path):
|
||||
# BUG: this is untested
|
||||
os.spawnlp(os.P_NOWAIT, 'open', 'open', path)
|
||||
|
||||
def launchpath_posix(path):
|
||||
if default_posix_browser:
|
||||
os.spawnlp(os.P_NOWAIT, default_posix_browser,
|
||||
default_posix_browser, path)
|
||||
|
||||
def launchpath(path):
|
||||
pass
|
||||
|
||||
def launchdir(path):
|
||||
if os.path.isdir(path):
|
||||
launchpath(path)
|
||||
|
||||
if os.name == 'nt':
|
||||
can_launch_files = True
|
||||
launchpath = launchpath_nt
|
||||
elif os.name == 'mac':
|
||||
can_launch_files = True
|
||||
launchpath = launchpath_mac
|
||||
elif os.name == 'posix':
|
||||
for b in posix_browsers:
|
||||
if os.system("which '%s' >/dev/null 2>&1" % b.replace("'","\\'")) == 0:
|
||||
can_launch_files = True
|
||||
default_posix_browser = b
|
||||
launchpath = launchpath_posix
|
||||
break
|
||||
|
101
BitTorrent/NatCheck.py
Executable file
101
BitTorrent/NatCheck.py
Executable file
@ -0,0 +1,101 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Bram Cohen
|
||||
|
||||
from cStringIO import StringIO
|
||||
from socket import error as socketerror
|
||||
|
||||
protocol_name = 'BitTorrent protocol'
|
||||
|
||||
# header, reserved, download id, my id, [length, message]
|
||||
|
||||
|
||||
class NatCheck(object):
|
||||
|
||||
def __init__(self, resultfunc, downloadid, peerid, ip, port, rawserver):
|
||||
self.resultfunc = resultfunc
|
||||
self.downloadid = downloadid
|
||||
self.peerid = peerid
|
||||
self.ip = ip
|
||||
self.port = port
|
||||
self.closed = False
|
||||
self.buffer = StringIO()
|
||||
self.next_len = 1
|
||||
self.next_func = self.read_header_len
|
||||
rawserver.async_start_connection((ip, port), self)
|
||||
|
||||
def connection_started(self, s):
|
||||
self.connection = s
|
||||
self.connection.write(chr(len(protocol_name)) + protocol_name +
|
||||
(chr(0) * 8) + self.downloadid)
|
||||
|
||||
def connection_failed(self, addr, exception):
|
||||
self.answer(False)
|
||||
|
||||
def answer(self, result):
|
||||
self.closed = True
|
||||
try:
|
||||
self.connection.close()
|
||||
except AttributeError:
|
||||
pass
|
||||
self.resultfunc(result, self.downloadid, self.peerid, self.ip, self.port)
|
||||
|
||||
def read_header_len(self, s):
|
||||
if ord(s) != len(protocol_name):
|
||||
return None
|
||||
return len(protocol_name), self.read_header
|
||||
|
||||
def read_header(self, s):
|
||||
if s != protocol_name:
|
||||
return None
|
||||
return 8, self.read_reserved
|
||||
|
||||
def read_reserved(self, s):
|
||||
return 20, self.read_download_id
|
||||
|
||||
def read_download_id(self, s):
|
||||
if s != self.downloadid:
|
||||
return None
|
||||
return 20, self.read_peer_id
|
||||
|
||||
def read_peer_id(self, s):
|
||||
if s != self.peerid:
|
||||
return None
|
||||
self.answer(True)
|
||||
return None
|
||||
|
||||
def data_came_in(self, connection, s):
|
||||
while True:
|
||||
if self.closed:
|
||||
return
|
||||
i = self.next_len - self.buffer.tell()
|
||||
if i > len(s):
|
||||
self.buffer.write(s)
|
||||
return
|
||||
self.buffer.write(s[:i])
|
||||
s = s[i:]
|
||||
m = self.buffer.getvalue()
|
||||
self.buffer.reset()
|
||||
self.buffer.truncate()
|
||||
x = self.next_func(m)
|
||||
if x is None:
|
||||
if not self.closed:
|
||||
self.answer(False)
|
||||
return
|
||||
self.next_len, self.next_func = x
|
||||
|
||||
def connection_lost(self, connection):
|
||||
if not self.closed:
|
||||
self.closed = True
|
||||
self.resultfunc(False, self.downloadid, self.peerid, self.ip, self.port)
|
||||
|
||||
def connection_flushed(self, connection):
|
||||
pass
|
249
BitTorrent/NewVersion.py
Executable file
249
BitTorrent/NewVersion.py
Executable file
@ -0,0 +1,249 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# written by Matt Chisholm
|
||||
|
||||
import os
|
||||
import sys
|
||||
import zurllib
|
||||
import pickle
|
||||
import threading
|
||||
from sha import sha
|
||||
|
||||
DEBUG = False
|
||||
|
||||
from BitTorrent import ERROR, WARNING, BTFailure, version, app_name
|
||||
from BitTorrent import GetTorrent
|
||||
from BitTorrent.bencode import bdecode, bencode
|
||||
from BitTorrent.platform import os_version, spawn, get_temp_dir, doc_root, is_frozen_exe, osx
|
||||
from BitTorrent.ConvertedMetainfo import ConvertedMetainfo
|
||||
|
||||
if osx:
|
||||
from Foundation import NSAutoreleasePool
|
||||
|
||||
if is_frozen_exe or DEBUG:
|
||||
# needed for py2exe to include the public key lib
|
||||
from Crypto.PublicKey import DSA
|
||||
|
||||
version_host = 'http://version.bittorrent.com/'
|
||||
download_url = 'http://bittorrent.com/download.html'
|
||||
|
||||
# based on Version() class from ShellTools package by Matt Chisholm,
|
||||
# used with permission
|
||||
class Version(list):
|
||||
def __str__(self):
|
||||
return '.'.join(map(str, self))
|
||||
|
||||
def is_beta(self):
|
||||
return self[1] % 2 == 1
|
||||
|
||||
def from_str(self, text):
|
||||
return Version( [int(t) for t in text.split('.')] )
|
||||
|
||||
def name(self):
|
||||
if self.is_beta():
|
||||
return 'beta'
|
||||
else:
|
||||
return 'stable'
|
||||
|
||||
from_str = classmethod(from_str)
|
||||
|
||||
currentversion = Version.from_str(version)
|
||||
|
||||
availableversion = None
|
||||
|
||||
class Updater(object):
|
||||
def __init__(self, threadwrap, newversionfunc, startfunc, installfunc, errorfunc):
|
||||
self.threadwrap = threadwrap # for calling back to UI from thread
|
||||
self.newversionfunc = newversionfunc # alert to new version UI function
|
||||
self.startfunc = startfunc # start torrent UI function
|
||||
self.installfunc = installfunc # install torrent UI function
|
||||
self.errorfunc = errorfunc # report error UI function
|
||||
self.infohash = None
|
||||
self.version = currentversion
|
||||
self.asked_for_install = False
|
||||
self.version_site = version_host
|
||||
if os.name == 'nt':
|
||||
self.version_site += 'win32/'
|
||||
if os_version != 'XP':
|
||||
self.version_site += 'legacy/'
|
||||
elif osx:
|
||||
self.version_site += 'osx/'
|
||||
|
||||
def debug(self, message):
|
||||
if DEBUG:
|
||||
self.threadwrap(self.errorfunc, WARNING, message)
|
||||
|
||||
def get_available(self):
|
||||
url = self.version_site + currentversion.name()
|
||||
self.debug('Updater.get_available() hitting url %s' % url)
|
||||
try:
|
||||
u = zurllib.urlopen(url)
|
||||
s = u.read()
|
||||
s = s.strip()
|
||||
except:
|
||||
raise BTFailure(_("Could not get latest version from %s")%url)
|
||||
try:
|
||||
assert len(s) == 5
|
||||
availableversion = Version.from_str(s)
|
||||
except:
|
||||
raise BTFailure(_("Could not parse new version string from %s")%url)
|
||||
self.version = availableversion
|
||||
self.debug('Updater.get_available() got %s' % str(self.version))
|
||||
return self.version
|
||||
|
||||
|
||||
def get(self):
|
||||
try:
|
||||
self.get_available()
|
||||
except BTFailure, e:
|
||||
self.threadwrap(self.errorfunc, WARNING, e)
|
||||
return
|
||||
|
||||
if self.version <= currentversion:
|
||||
self.debug('Updater.get() not updating old version %s' % str(self.version))
|
||||
return
|
||||
|
||||
if not self.can_install():
|
||||
self.debug('Updater.get() cannot install on this os')
|
||||
return
|
||||
|
||||
self.installer_name = self.calc_installer_name()
|
||||
self.installer_url = self.version_site + self.installer_name + '.torrent'
|
||||
self.installer_dir = self.calc_installer_dir()
|
||||
|
||||
self.torrentfile = None
|
||||
torrentfile, terrors = GetTorrent.get_url(self.installer_url)
|
||||
signature = None
|
||||
try:
|
||||
signfile = zurllib.urlopen(self.installer_url + '.sign')
|
||||
except:
|
||||
self.debug('Updater.get() failed to get signfile %s.sign' % self.installer_url)
|
||||
else:
|
||||
try:
|
||||
signature = pickle.load(signfile)
|
||||
except:
|
||||
self.debug('Updater.get() failed to load signfile %s' % signfile)
|
||||
|
||||
if terrors:
|
||||
self.threadwrap(self.errorfunc, WARNING, '\n'.join(terrors))
|
||||
|
||||
if torrentfile and signature:
|
||||
public_key_file = open(os.path.join(doc_root, 'public.key'), 'rb')
|
||||
public_key = pickle.load(public_key_file)
|
||||
h = sha(torrentfile).digest()
|
||||
if public_key.verify(h, signature):
|
||||
self.torrentfile = torrentfile
|
||||
b = bdecode(torrentfile)
|
||||
self.infohash = sha(bencode(b['info'])).digest()
|
||||
self.total_size = b['info']['length']
|
||||
self.debug('Updater.get() got torrent file and signature')
|
||||
else:
|
||||
self.debug('Updater.get() torrent file signature failed to verify.')
|
||||
pass
|
||||
else:
|
||||
self.debug('Updater.get() doesn\'t have torrentfile %s and signature %s' %
|
||||
(str(type(torrentfile)), str(type(signature))))
|
||||
|
||||
def installer_path(self):
|
||||
if self.installer_dir is not None:
|
||||
return os.path.join(self.installer_dir,
|
||||
self.installer_name)
|
||||
else:
|
||||
return None
|
||||
|
||||
def check(self):
|
||||
t = threading.Thread(target=self._check,
|
||||
args=())
|
||||
t.start()
|
||||
|
||||
def _check(self):
|
||||
if osx:
|
||||
pool = NSAutoreleasePool.alloc().init()
|
||||
self.get()
|
||||
if self.version > currentversion:
|
||||
self.threadwrap(self.newversionfunc, self.version, download_url)
|
||||
|
||||
def can_install(self):
|
||||
if DEBUG:
|
||||
return True
|
||||
if os.name == 'nt':
|
||||
return True
|
||||
elif osx:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def calc_installer_name(self):
|
||||
if os.name == 'nt':
|
||||
ext = 'exe'
|
||||
elif osx:
|
||||
ext = 'dmg'
|
||||
elif os.name == 'posix' and DEBUG:
|
||||
ext = 'tar.gz'
|
||||
else:
|
||||
return
|
||||
|
||||
parts = [app_name, str(self.version)]
|
||||
if self.version.is_beta():
|
||||
parts.append('Beta')
|
||||
name = '-'.join(parts)
|
||||
name += '.' + ext
|
||||
return name
|
||||
|
||||
def set_installer_dir(self, path):
|
||||
self.installer_dir = path
|
||||
|
||||
def calc_installer_dir(self):
|
||||
if hasattr(self, 'installer_dir'):
|
||||
return self.installer_dir
|
||||
|
||||
temp_dir = get_temp_dir()
|
||||
if temp_dir is not None:
|
||||
return temp_dir
|
||||
else:
|
||||
self.errorfunc(WARNING,
|
||||
_("Could not find a suitable temporary location to "
|
||||
"save the %s %s installer.") % (app_name, self.version))
|
||||
|
||||
def installer_downloaded(self):
|
||||
if self.installer_path() and os.access(self.installer_path(), os.F_OK):
|
||||
size = os.stat(self.installer_path())[6]
|
||||
if size == self.total_size:
|
||||
return True
|
||||
else:
|
||||
#print 'installer is wrong size, is', size, 'should be', self.total_size
|
||||
return False
|
||||
else:
|
||||
#print 'installer does not exist'
|
||||
return False
|
||||
|
||||
def download(self):
|
||||
if self.torrentfile is not None:
|
||||
self.startfunc(self.torrentfile, self.installer_path())
|
||||
else:
|
||||
self.errorfunc(WARNING, _("No torrent file available for %s %s "
|
||||
"installer.")%(app_name, self.version))
|
||||
|
||||
def start_install(self):
|
||||
if not self.asked_for_install:
|
||||
if self.installer_downloaded():
|
||||
self.asked_for_install = True
|
||||
self.installfunc()
|
||||
else:
|
||||
self.errorfunc(WARNING,
|
||||
_("%s %s installer appears to be corrupt "
|
||||
"or missing.")%(app_name, self.version))
|
||||
|
||||
def launch_installer(self):
|
||||
if os.name == 'nt':
|
||||
os.startfile(self.installer_path())
|
||||
else:
|
||||
self.errorfunc(WARNING, _("Cannot launch installer on this OS"))
|
138
BitTorrent/PiecePicker.py
Executable file
138
BitTorrent/PiecePicker.py
Executable file
@ -0,0 +1,138 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Bram Cohen
|
||||
|
||||
from random import randrange, shuffle, choice
|
||||
|
||||
|
||||
class PiecePicker(object):
|
||||
|
||||
def __init__(self, numpieces, config):
|
||||
self.config = config
|
||||
self.numpieces = numpieces
|
||||
self.interests = [range(numpieces)]
|
||||
self.pos_in_interests = range(numpieces)
|
||||
self.numinterests = [0] * numpieces
|
||||
self.have = [False] * numpieces
|
||||
self.crosscount = [numpieces]
|
||||
self.started = []
|
||||
self.seedstarted = []
|
||||
self.numgot = 0
|
||||
self.scrambled = range(numpieces)
|
||||
shuffle(self.scrambled)
|
||||
|
||||
def got_have(self, piece):
|
||||
numint = self.numinterests[piece]
|
||||
self.crosscount[numint + self.have[piece]] -= 1
|
||||
self.numinterests[piece] += 1
|
||||
try:
|
||||
self.crosscount[numint + 1 + self.have[piece]] += 1
|
||||
except IndexError:
|
||||
self.crosscount.append(1)
|
||||
if self.have[piece]:
|
||||
return
|
||||
if numint == len(self.interests) - 1:
|
||||
self.interests.append([])
|
||||
self._shift_over(piece, self.interests[numint], self.interests[numint + 1])
|
||||
|
||||
def lost_have(self, piece):
|
||||
numint = self.numinterests[piece]
|
||||
self.crosscount[numint + self.have[piece]] -= 1
|
||||
self.numinterests[piece] -= 1
|
||||
self.crosscount[numint - 1 + self.have[piece]] += 1
|
||||
if self.have[piece]:
|
||||
return
|
||||
self._shift_over(piece, self.interests[numint], self.interests[numint - 1])
|
||||
|
||||
def _shift_over(self, piece, l1, l2):
|
||||
p = self.pos_in_interests[piece]
|
||||
l1[p] = l1[-1]
|
||||
self.pos_in_interests[l1[-1]] = p
|
||||
del l1[-1]
|
||||
newp = randrange(len(l2) + 1)
|
||||
if newp == len(l2):
|
||||
self.pos_in_interests[piece] = len(l2)
|
||||
l2.append(piece)
|
||||
else:
|
||||
old = l2[newp]
|
||||
self.pos_in_interests[old] = len(l2)
|
||||
l2.append(old)
|
||||
l2[newp] = piece
|
||||
self.pos_in_interests[piece] = newp
|
||||
|
||||
def requested(self, piece, seed = False):
|
||||
if piece not in self.started:
|
||||
self.started.append(piece)
|
||||
if seed and piece not in self.seedstarted:
|
||||
self.seedstarted.append(piece)
|
||||
|
||||
def complete(self, piece):
|
||||
assert not self.have[piece]
|
||||
self.have[piece] = True
|
||||
self.crosscount[self.numinterests[piece]] -= 1
|
||||
try:
|
||||
self.crosscount[self.numinterests[piece] + 1] += 1
|
||||
except IndexError:
|
||||
self.crosscount.append(1)
|
||||
self.numgot += 1
|
||||
l = self.interests[self.numinterests[piece]]
|
||||
p = self.pos_in_interests[piece]
|
||||
l[p] = l[-1]
|
||||
self.pos_in_interests[l[-1]] = p
|
||||
del l[-1]
|
||||
try:
|
||||
self.started.remove(piece)
|
||||
self.seedstarted.remove(piece)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def next(self, havefunc, seed = False):
|
||||
bests = None
|
||||
bestnum = 2 ** 30
|
||||
if seed:
|
||||
s = self.seedstarted
|
||||
else:
|
||||
s = self.started
|
||||
for i in s:
|
||||
if havefunc(i):
|
||||
if self.numinterests[i] < bestnum:
|
||||
bests = [i]
|
||||
bestnum = self.numinterests[i]
|
||||
elif self.numinterests[i] == bestnum:
|
||||
bests.append(i)
|
||||
if bests:
|
||||
return choice(bests)
|
||||
if self.numgot < self.config['rarest_first_cutoff']:
|
||||
for i in self.scrambled:
|
||||
if havefunc(i):
|
||||
return i
|
||||
return None
|
||||
for i in xrange(1, min(bestnum, len(self.interests))):
|
||||
for j in self.interests[i]:
|
||||
if havefunc(j):
|
||||
return j
|
||||
return None
|
||||
|
||||
def am_I_complete(self):
|
||||
return self.numgot == self.numpieces
|
||||
|
||||
def bump(self, piece):
|
||||
l = self.interests[self.numinterests[piece]]
|
||||
pos = self.pos_in_interests[piece]
|
||||
del l[pos]
|
||||
l.append(piece)
|
||||
for i in range(pos,len(l)):
|
||||
self.pos_in_interests[l[i]] = i
|
||||
try:
|
||||
self.started.remove(piece)
|
||||
self.seedstarted.remove(piece)
|
||||
except ValueError:
|
||||
pass
|
183
BitTorrent/RateLimiter.py
Executable file
183
BitTorrent/RateLimiter.py
Executable file
@ -0,0 +1,183 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Uoti Urpala and Andrew Loewenstern
|
||||
|
||||
from BitTorrent.platform import bttime
|
||||
|
||||
def minctx(a,b):
|
||||
A = B = 0
|
||||
if a.rate > 0:
|
||||
A = a.offset_amount / a.rate
|
||||
if b.rate > 0:
|
||||
B = b.offset_amount / b.rate
|
||||
if A <= B:
|
||||
return a
|
||||
return b
|
||||
|
||||
class Dummy(object):
|
||||
def __init__(self, next):
|
||||
self.next_upload = next
|
||||
def send_partial(self, size):
|
||||
return 0
|
||||
closed = False
|
||||
|
||||
class RateLimitedGroup(object):
|
||||
def __init__(self, rate, got_exception):
|
||||
self.got_exception = got_exception
|
||||
# limiting
|
||||
self.check_time = 0
|
||||
self.lasttime = bttime()
|
||||
self.offset_amount = 0
|
||||
self.set_rate(rate)
|
||||
# accounting
|
||||
self.count = 0
|
||||
self.counts = []
|
||||
|
||||
def set_rate(self, new_rate):
|
||||
self.rate = new_rate * 1024
|
||||
self.check_time = 0
|
||||
self.offset_amount = 0
|
||||
|
||||
class MultiRateLimiter(object):
|
||||
def __init__(self, sched):
|
||||
self.sched = sched
|
||||
self.last = None
|
||||
self.upload_rate = 0
|
||||
self.unitsize = 17000
|
||||
self.offset_amount = 0
|
||||
self.ctxs = [] # list of contexts with connections in the queue
|
||||
self.ctx_counts = {} # dict conn -> how many connections each context has
|
||||
|
||||
def set_parameters(self, rate, unitsize):
|
||||
if unitsize > 17000:
|
||||
# Since data is sent to peers in a round-robin fashion, max one
|
||||
# full request at a time, setting this higher would send more data
|
||||
# to peers that use request sizes larger than standard 16 KiB.
|
||||
# 17000 instead of 16384 to allow room for metadata messages.
|
||||
unitsize = 17000
|
||||
self.upload_rate = rate * 1024
|
||||
self.unitsize = unitsize
|
||||
self.lasttime = bttime()
|
||||
self.offset_amount = 0
|
||||
|
||||
def queue(self, conn, ctx):
|
||||
assert conn.next_upload is None
|
||||
if ctx not in self.ctxs:
|
||||
ctx.check_time = 1
|
||||
self.ctxs.append(ctx)
|
||||
self.ctx_counts[ctx] = 1
|
||||
else:
|
||||
self.ctx_counts[ctx] += 1
|
||||
|
||||
if self.last is None:
|
||||
self.last = conn
|
||||
conn.next_upload = conn
|
||||
self.try_send(True)
|
||||
else:
|
||||
conn.next_upload = self.last.next_upload
|
||||
self.last.next_upload = conn
|
||||
self.last = conn
|
||||
|
||||
def increase_offset(self, bytes):
|
||||
self.offset_amount += bytes
|
||||
|
||||
def try_send(self, check_time = False):
|
||||
t = bttime()
|
||||
cur = self.last.next_upload
|
||||
|
||||
self.offset_amount -= (t - self.lasttime) * self.upload_rate
|
||||
self.offset_amount = max(self.offset_amount, -1 * self.upload_rate)
|
||||
self.lasttime = t
|
||||
|
||||
for ctx in self.ctxs:
|
||||
if ctx.rate == 0:
|
||||
ctx.offset_amount = 0
|
||||
if ctx.lasttime != t:
|
||||
ctx.offset_amount -=(t - ctx.lasttime) * ctx.rate
|
||||
ctx.lasttime = t
|
||||
if ctx.check_time:
|
||||
ctx.offset_amount = max(ctx.offset_amount, 0)
|
||||
|
||||
min_offset = reduce(minctx, self.ctxs)
|
||||
ctx = cur.encoder.context.rlgroup
|
||||
while (self.offset_amount <= 0 and min_offset.offset_amount <= 0) or self.upload_rate == 0:
|
||||
if ctx.offset_amount <= 0:
|
||||
try:
|
||||
bytes = cur.send_partial(self.unitsize)
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except Exception, e:
|
||||
cur.encoder.context.rlgroup.got_exception(e)
|
||||
cur = self.last.next_upload
|
||||
bytes = 0
|
||||
|
||||
if self.upload_rate > 0:
|
||||
self.offset_amount += bytes
|
||||
if ctx.rate > 0:
|
||||
ctx.offset_amount += bytes
|
||||
ctx.count += bytes
|
||||
|
||||
if bytes == 0 or not cur.connection.is_flushed():
|
||||
if self.last is cur:
|
||||
self.last = None
|
||||
cur.next_upload = None
|
||||
self.ctx_counts = {}
|
||||
self.ctxs = []
|
||||
break
|
||||
else:
|
||||
self.last.next_upload = cur.next_upload
|
||||
cur.next_upload = None
|
||||
old = ctx
|
||||
cur = self.last.next_upload
|
||||
ctx = cur.encoder.context.rlgroup
|
||||
self.ctx_counts[old] -= 1
|
||||
if self.ctx_counts[old] == 0:
|
||||
del(self.ctx_counts[old])
|
||||
self.ctxs.remove(old)
|
||||
if min_offset == old:
|
||||
min_offset = reduce(minctx, self.ctxs)
|
||||
else:
|
||||
self.last = cur
|
||||
cur = cur.next_upload
|
||||
ctx = cur.encoder.context.rlgroup
|
||||
min_offset = reduce(minctx, self.ctxs)
|
||||
else:
|
||||
self.last = cur
|
||||
cur = self.last.next_upload
|
||||
ctx = cur.encoder.context.rlgroup
|
||||
else:
|
||||
myDelay = minCtxDelay = 0
|
||||
if self.upload_rate > 0:
|
||||
myDelay = self.offset_amount / self.upload_rate
|
||||
if min_offset.rate > 0:
|
||||
minCtxDelay = min_offset.offset_amount / min_offset.rate
|
||||
delay = max(myDelay, minCtxDelay)
|
||||
self.sched(self.try_send, delay)
|
||||
|
||||
|
||||
def clean_closed(self):
|
||||
if self.last is None:
|
||||
return
|
||||
orig = self.last
|
||||
if self.last.closed:
|
||||
self.last = Dummy(self.last.next_upload)
|
||||
self.last.encoder = orig.encoder
|
||||
c = self.last
|
||||
while True:
|
||||
if c.next_upload is orig:
|
||||
c.next_upload = self.last
|
||||
break
|
||||
if c.next_upload.closed:
|
||||
o = c.next_upload
|
||||
c.next_upload = Dummy(c.next_upload.next_upload)
|
||||
c.next_upload.encoder = o.encoder
|
||||
c = c.next_upload
|
||||
|
63
BitTorrent/RateMeasure.py
Executable file
63
BitTorrent/RateMeasure.py
Executable file
@ -0,0 +1,63 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Bram Cohen
|
||||
|
||||
from BitTorrent.platform import bttime
|
||||
|
||||
|
||||
class RateMeasure(object):
|
||||
|
||||
def __init__(self, left):
|
||||
self.start = None
|
||||
self.last = None
|
||||
self.rate = 0
|
||||
self.remaining = None
|
||||
self.left = left
|
||||
self.broke = False
|
||||
self.got_anything = False
|
||||
|
||||
def data_came_in(self, amount):
|
||||
if not self.got_anything:
|
||||
self.got_anything = True
|
||||
self.start = bttime() - 2
|
||||
self.last = self.start
|
||||
self.left -= amount
|
||||
return
|
||||
self.update(bttime(), amount)
|
||||
|
||||
def data_rejected(self, amount):
|
||||
self.left += amount
|
||||
|
||||
def get_time_left(self):
|
||||
if not self.got_anything:
|
||||
return None
|
||||
t = bttime()
|
||||
if t - self.last > 15:
|
||||
self.update(t, 0)
|
||||
return self.remaining
|
||||
|
||||
def get_size_left(self):
|
||||
return self.left
|
||||
|
||||
def update(self, t, amount):
|
||||
self.left -= amount
|
||||
try:
|
||||
self.rate = ((self.rate * (self.last - self.start)) + amount) / (t - self.start)
|
||||
self.last = t
|
||||
self.remaining = self.left / self.rate
|
||||
if self.start < self.last - self.remaining:
|
||||
self.start = self.last - self.remaining
|
||||
except ZeroDivisionError:
|
||||
self.remaining = None
|
||||
if self.broke and self.last - self.start < 20:
|
||||
self.start = self.last - 20
|
||||
if self.last - self.start > 20:
|
||||
self.broke = True
|
472
BitTorrent/RawServer.py
Executable file
472
BitTorrent/RawServer.py
Executable file
@ -0,0 +1,472 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Bram Cohen, Uoti Urpala
|
||||
|
||||
import os
|
||||
import sys
|
||||
import socket
|
||||
import signal
|
||||
import struct
|
||||
import thread
|
||||
from bisect import insort
|
||||
from cStringIO import StringIO
|
||||
from traceback import print_exc
|
||||
from errno import EWOULDBLOCK, ENOBUFS
|
||||
|
||||
from BitTorrent.platform import bttime
|
||||
from BitTorrent import WARNING, CRITICAL, FAQ_URL
|
||||
from BitTorrent.defer import Deferred
|
||||
|
||||
try:
|
||||
from select import poll, error, POLLIN, POLLOUT, POLLERR, POLLHUP
|
||||
timemult = 1000
|
||||
except ImportError:
|
||||
from BitTorrent.selectpoll import poll, error, POLLIN, POLLOUT, POLLERR, POLLHUP
|
||||
timemult = 1
|
||||
|
||||
NOLINGER = struct.pack('ii', 1, 0)
|
||||
|
||||
|
||||
class Handler(object):
|
||||
|
||||
# there is only a semantic difference between "made" and "started".
|
||||
# I prefer "started"
|
||||
def connection_started(self, s):
|
||||
self.connection_made(s)
|
||||
def connection_made(self, s):
|
||||
pass
|
||||
|
||||
def connection_lost(self, s):
|
||||
pass
|
||||
|
||||
# Maybe connection_lost should just have a default 'None' exception parameter
|
||||
def connection_failed(self, addr, exception):
|
||||
pass
|
||||
|
||||
def connection_flushed(self, s):
|
||||
pass
|
||||
def data_came_in(self, addr, datagram):
|
||||
pass
|
||||
|
||||
|
||||
class SingleSocket(object):
|
||||
|
||||
def __init__(self, raw_server, sock, handler, context, ip=None):
|
||||
self.raw_server = raw_server
|
||||
self.socket = sock
|
||||
self.handler = handler
|
||||
self.buffer = []
|
||||
self.last_hit = bttime()
|
||||
self.fileno = sock.fileno()
|
||||
self.connected = False
|
||||
self.context = context
|
||||
self.port = None
|
||||
|
||||
if ip is not None:
|
||||
self.ip = ip
|
||||
else:
|
||||
try:
|
||||
peername = self.socket.getpeername()
|
||||
except socket.error:
|
||||
self.ip = 'unknown'
|
||||
else:
|
||||
try:
|
||||
self.ip, self.port = peername
|
||||
except:
|
||||
assert isinstance(peername, basestring)
|
||||
self.ip = peername # UNIX socket, not really ip
|
||||
|
||||
def close(self):
|
||||
sock = self.socket
|
||||
self.socket = None
|
||||
self.buffer = []
|
||||
del self.raw_server.single_sockets[self.fileno]
|
||||
self.raw_server.poll.unregister(sock)
|
||||
self.handler = None
|
||||
if self.raw_server.config['close_with_rst']:
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, NOLINGER)
|
||||
sock.close()
|
||||
|
||||
def shutdown(self, val):
|
||||
self.socket.shutdown(val)
|
||||
|
||||
def is_flushed(self):
|
||||
return len(self.buffer) == 0
|
||||
|
||||
def write(self, s):
|
||||
assert self.socket is not None
|
||||
self.buffer.append(s)
|
||||
if len(self.buffer) == 1:
|
||||
self.try_write()
|
||||
|
||||
def try_write(self):
|
||||
if self.connected:
|
||||
try:
|
||||
while self.buffer != []:
|
||||
amount = self.socket.send(self.buffer[0])
|
||||
if amount != len(self.buffer[0]):
|
||||
if amount != 0:
|
||||
self.buffer[0] = self.buffer[0][amount:]
|
||||
break
|
||||
del self.buffer[0]
|
||||
except socket.error, e:
|
||||
code, msg = e
|
||||
if code != EWOULDBLOCK:
|
||||
self.raw_server.dead_from_write.append(self)
|
||||
return
|
||||
if self.buffer == []:
|
||||
self.raw_server.poll.register(self.socket, POLLIN)
|
||||
else:
|
||||
self.raw_server.poll.register(self.socket, POLLIN | POLLOUT)
|
||||
|
||||
|
||||
def default_error_handler(level, message):
|
||||
print message
|
||||
|
||||
class RawServer(object):
|
||||
|
||||
def __init__(self, doneflag, config, noisy=True,
|
||||
errorfunc=default_error_handler, tos=0):
|
||||
self.config = config
|
||||
self.tos = tos
|
||||
self.poll = poll()
|
||||
# {socket: SingleSocket}
|
||||
self.single_sockets = {}
|
||||
self.udp_sockets = {}
|
||||
self.dead_from_write = []
|
||||
self.doneflag = doneflag
|
||||
self.noisy = noisy
|
||||
self.errorfunc = errorfunc
|
||||
self.funcs = []
|
||||
self.externally_added_tasks = []
|
||||
self.listening_handlers = {}
|
||||
self.serversockets = {}
|
||||
self.live_contexts = {None : True}
|
||||
self.ident = thread.get_ident()
|
||||
self.to_start = []
|
||||
self.add_task(self.scan_for_timeouts, config['timeout_check_interval'])
|
||||
if sys.platform.startswith('win'):
|
||||
# Windows doesn't support pipes with select(). Just prevent sleeps
|
||||
# longer than a second instead of proper wakeup for now.
|
||||
self.wakeupfds = (None, None)
|
||||
self._wakeup()
|
||||
else:
|
||||
self.wakeupfds = os.pipe()
|
||||
self.poll.register(self.wakeupfds[0], POLLIN)
|
||||
|
||||
def _wakeup(self):
|
||||
self.add_task(self._wakeup, 1)
|
||||
|
||||
def add_context(self, context):
|
||||
self.live_contexts[context] = True
|
||||
|
||||
def remove_context(self, context):
|
||||
del self.live_contexts[context]
|
||||
self.funcs = [x for x in self.funcs if x[3] != context]
|
||||
|
||||
def add_task(self, func, delay, args=(), context=None):
|
||||
assert thread.get_ident() == self.ident
|
||||
assert type(args) == list or type(args) == tuple
|
||||
if context in self.live_contexts:
|
||||
insort(self.funcs, (bttime() + delay, func, args, context))
|
||||
|
||||
def external_add_task(self, func, delay, args=(), context=None):
|
||||
assert type(args) == list or type(args) == tuple
|
||||
self.externally_added_tasks.append((func, delay, args, context))
|
||||
# Wake up the RawServer thread in case it's sleeping in poll()
|
||||
if self.wakeupfds[1] is not None:
|
||||
os.write(self.wakeupfds[1], 'X')
|
||||
|
||||
def scan_for_timeouts(self):
|
||||
self.add_task(self.scan_for_timeouts,
|
||||
self.config['timeout_check_interval'])
|
||||
t = bttime() - self.config['socket_timeout']
|
||||
tokill = []
|
||||
for s in [s for s in self.single_sockets.values() if s not in self.udp_sockets.keys()]:
|
||||
if s.last_hit < t:
|
||||
tokill.append(s)
|
||||
for k in tokill:
|
||||
if k.socket is not None:
|
||||
self._close_socket(k)
|
||||
|
||||
def create_unixserversocket(filename):
|
||||
server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
server.setblocking(0)
|
||||
server.bind(filename)
|
||||
server.listen(5)
|
||||
return server
|
||||
create_unixserversocket = staticmethod(create_unixserversocket)
|
||||
|
||||
def create_serversocket(port, bind='', reuse=False, tos=0):
|
||||
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
if reuse and os.name != 'nt':
|
||||
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
server.setblocking(0)
|
||||
if tos != 0:
|
||||
try:
|
||||
server.setsockopt(socket.IPPROTO_IP, socket.IP_TOS, tos)
|
||||
except:
|
||||
pass
|
||||
server.bind((bind, port))
|
||||
server.listen(5)
|
||||
return server
|
||||
create_serversocket = staticmethod(create_serversocket)
|
||||
|
||||
def create_udpsocket(port, bind='', reuse=False, tos=0):
|
||||
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
if reuse and os.name != 'nt':
|
||||
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
server.setblocking(0)
|
||||
if tos != 0:
|
||||
try:
|
||||
server.setsockopt(socket.IPPROTO_IP, socket.IP_TOS, tos)
|
||||
except:
|
||||
pass
|
||||
server.bind((bind, port))
|
||||
return server
|
||||
create_udpsocket = staticmethod(create_udpsocket)
|
||||
|
||||
def start_listening(self, serversocket, handler, context=None):
|
||||
self.listening_handlers[serversocket.fileno()] = (handler, context)
|
||||
self.serversockets[serversocket.fileno()] = serversocket
|
||||
self.poll.register(serversocket, POLLIN)
|
||||
|
||||
def start_listening_udp(self, serversocket, handler, context=None):
|
||||
self.listening_handlers[serversocket.fileno()] = (handler, context)
|
||||
nss = SingleSocket(self, serversocket, handler, context)
|
||||
self.single_sockets[serversocket.fileno()] = nss
|
||||
self.udp_sockets[nss] = 1
|
||||
self.poll.register(serversocket, POLLIN)
|
||||
|
||||
def stop_listening(self, serversocket):
|
||||
del self.listening_handlers[serversocket.fileno()]
|
||||
del self.serversockets[serversocket.fileno()]
|
||||
self.poll.unregister(serversocket)
|
||||
|
||||
def stop_listening_udp(self, serversocket):
|
||||
del self.listening_handlers[serversocket.fileno()]
|
||||
del self.single_sockets[serversocket.fileno()]
|
||||
l = [s for s in self.udp_sockets.keys() if s.socket == serversocket]
|
||||
del self.udp_sockets[l[0]]
|
||||
self.poll.unregister(serversocket)
|
||||
|
||||
def start_connection(self, dns, handler=None, context=None, do_bind=True):
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock.setblocking(0)
|
||||
bindaddr = do_bind and self.config['bind']
|
||||
if bindaddr:
|
||||
sock.bind((bindaddr, 0))
|
||||
if self.tos != 0:
|
||||
try:
|
||||
sock.setsockopt(socket.IPPROTO_IP, socket.IP_TOS, self.tos)
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
sock.connect_ex(dns)
|
||||
except socket.error:
|
||||
sock.close()
|
||||
raise
|
||||
except Exception, e:
|
||||
sock.close()
|
||||
raise socket.error(str(e))
|
||||
self.poll.register(sock, POLLIN)
|
||||
s = SingleSocket(self, sock, handler, context, dns[0])
|
||||
self.single_sockets[sock.fileno()] = s
|
||||
return s
|
||||
|
||||
def async_start_connection(self, dns, handler=None, context=None, do_bind=True):
|
||||
self.to_start.insert(0, (dns, handler, context, do_bind))
|
||||
self._start_connection()
|
||||
|
||||
def _start_connection(self):
|
||||
dns, handler, context, do_bind = self.to_start.pop()
|
||||
try:
|
||||
s = self.start_connection(dns, handler, context, do_bind)
|
||||
except Exception, e:
|
||||
handler.connection_failed(dns, e)
|
||||
else:
|
||||
handler.connection_started(s)
|
||||
|
||||
def wrap_socket(self, sock, handler, context=None, ip=None):
|
||||
sock.setblocking(0)
|
||||
self.poll.register(sock, POLLIN)
|
||||
s = SingleSocket(self, sock, handler, context, ip)
|
||||
self.single_sockets[sock.fileno()] = s
|
||||
return s
|
||||
|
||||
# must be called from the main thread
|
||||
def install_sigint_handler(self):
|
||||
signal.signal(signal.SIGINT, self._handler)
|
||||
|
||||
def _handler(self, signum, frame):
|
||||
self.external_add_task(self.doneflag.set, 0)
|
||||
# Allow pressing ctrl-c multiple times to raise KeyboardInterrupt,
|
||||
# in case the program is in an infinite loop
|
||||
signal.signal(signal.SIGINT, signal.default_int_handler)
|
||||
|
||||
def _handle_events(self, events):
|
||||
for sock, event in events:
|
||||
if sock in self.serversockets:
|
||||
s = self.serversockets[sock]
|
||||
if event & (POLLHUP | POLLERR) != 0:
|
||||
self.poll.unregister(s)
|
||||
s.close()
|
||||
self.errorfunc(CRITICAL, _("lost server socket"))
|
||||
else:
|
||||
handler, context = self.listening_handlers[sock]
|
||||
try:
|
||||
newsock, addr = s.accept()
|
||||
except socket.error, e:
|
||||
continue
|
||||
try:
|
||||
newsock.setblocking(0)
|
||||
nss = SingleSocket(self, newsock, handler, context)
|
||||
self.single_sockets[newsock.fileno()] = nss
|
||||
self.poll.register(newsock, POLLIN)
|
||||
self._make_wrapped_call(handler. \
|
||||
connection_made, (nss,), context=context)
|
||||
except socket.error, e:
|
||||
self.errorfunc(WARNING,
|
||||
_("Error handling accepted connection: ") +
|
||||
str(e))
|
||||
else:
|
||||
s = self.single_sockets.get(sock)
|
||||
if s is None:
|
||||
if sock == self.wakeupfds[0]:
|
||||
# Another thread wrote this just to wake us up.
|
||||
os.read(sock, 1)
|
||||
continue
|
||||
s.connected = True
|
||||
if event & POLLERR:
|
||||
self._close_socket(s)
|
||||
continue
|
||||
if event & (POLLIN | POLLHUP):
|
||||
s.last_hit = bttime()
|
||||
try:
|
||||
data, addr = s.socket.recvfrom(100000)
|
||||
except socket.error, e:
|
||||
code, msg = e
|
||||
if code != EWOULDBLOCK:
|
||||
self._close_socket(s)
|
||||
continue
|
||||
if data == '' and not self.udp_sockets.has_key(s):
|
||||
self._close_socket(s)
|
||||
else:
|
||||
if not self.udp_sockets.has_key(s):
|
||||
self._make_wrapped_call(s.handler.data_came_in,
|
||||
(s, data), s)
|
||||
else:
|
||||
self._make_wrapped_call(s.handler.data_came_in,
|
||||
(addr, data), s)
|
||||
|
||||
# data_came_in could have closed the socket (s.socket = None)
|
||||
if event & POLLOUT and s.socket is not None:
|
||||
s.try_write()
|
||||
if s.is_flushed():
|
||||
self._make_wrapped_call(s.handler.connection_flushed,
|
||||
(s,), s)
|
||||
|
||||
def _pop_externally_added(self):
|
||||
while self.externally_added_tasks:
|
||||
task = self.externally_added_tasks.pop(0)
|
||||
self.add_task(*task)
|
||||
|
||||
def listen_forever(self):
|
||||
ret = 0
|
||||
self.ident = thread.get_ident()
|
||||
while not self.doneflag.isSet() and not ret:
|
||||
ret = self.listen_once()
|
||||
|
||||
def listen_once(self, period=1e9):
|
||||
try:
|
||||
self._pop_externally_added()
|
||||
if self.funcs:
|
||||
period = self.funcs[0][0] - bttime()
|
||||
if period < 0:
|
||||
period = 0
|
||||
events = self.poll.poll(period * timemult)
|
||||
if self.doneflag.isSet():
|
||||
return 0
|
||||
while self.funcs and self.funcs[0][0] <= bttime():
|
||||
garbage, func, args, context = self.funcs.pop(0)
|
||||
self._make_wrapped_call(func, args, context=context)
|
||||
self._close_dead()
|
||||
self._handle_events(events)
|
||||
if self.doneflag.isSet():
|
||||
return 0
|
||||
self._close_dead()
|
||||
except error, e:
|
||||
if self.doneflag.isSet():
|
||||
return 0
|
||||
# I can't find a coherent explanation for what the behavior
|
||||
# should be here, and people report conflicting behavior,
|
||||
# so I'll just try all the possibilities
|
||||
try:
|
||||
code, msg, desc = e
|
||||
except:
|
||||
try:
|
||||
code, msg = e
|
||||
except:
|
||||
code = e
|
||||
if code == ENOBUFS:
|
||||
# log the traceback so we can see where the exception is coming from
|
||||
print_exc(file = sys.stderr)
|
||||
self.errorfunc(CRITICAL,
|
||||
_("Have to exit due to the TCP stack flaking "
|
||||
"out. Please see the FAQ at %s") % FAQ_URL)
|
||||
return -1
|
||||
#self.errorfunc(CRITICAL, str(e))
|
||||
except KeyboardInterrupt:
|
||||
print_exc()
|
||||
return -1
|
||||
except:
|
||||
data = StringIO()
|
||||
print_exc(file=data)
|
||||
self.errorfunc(CRITICAL, data.getvalue())
|
||||
return 0
|
||||
|
||||
def _make_wrapped_call(self, function, args, socket=None, context=None):
|
||||
try:
|
||||
function(*args)
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except Exception, e: # hopefully nothing raises strings
|
||||
# Incoming sockets can be assigned to a particular torrent during
|
||||
# a data_came_in call, and it's possible (though not likely) that
|
||||
# there could be a torrent-specific exception during the same call.
|
||||
# Therefore read the context after the call.
|
||||
if socket is not None:
|
||||
context = socket.context
|
||||
if self.noisy and context is None:
|
||||
data = StringIO()
|
||||
print_exc(file=data)
|
||||
self.errorfunc(CRITICAL, data.getvalue())
|
||||
if context is not None:
|
||||
context.got_exception(e)
|
||||
|
||||
def _close_dead(self):
|
||||
while len(self.dead_from_write) > 0:
|
||||
old = self.dead_from_write
|
||||
self.dead_from_write = []
|
||||
for s in old:
|
||||
if s.socket is not None:
|
||||
self._close_socket(s)
|
||||
|
||||
def _close_socket(self, s):
|
||||
sock = s.socket.fileno()
|
||||
if self.config['close_with_rst']:
|
||||
s.socket.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, NOLINGER)
|
||||
s.socket.close()
|
||||
self.poll.unregister(sock)
|
||||
del self.single_sockets[sock]
|
||||
s.socket = None
|
||||
self._make_wrapped_call(s.handler.connection_lost, (s,), s)
|
||||
s.handler = None
|
63
BitTorrent/RawServer_magic.py
Executable file
63
BitTorrent/RawServer_magic.py
Executable file
@ -0,0 +1,63 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Greg Hazel
|
||||
|
||||
class BaseMagic:
|
||||
base = None
|
||||
too_late = False
|
||||
|
||||
magic = BaseMagic()
|
||||
|
||||
from BitTorrent import BTFailure
|
||||
|
||||
try:
|
||||
import RawServer_twisted
|
||||
magic.base = RawServer_twisted.RawServer
|
||||
Handler = RawServer_twisted.Handler
|
||||
except ImportError:
|
||||
import RawServer
|
||||
magic.base = RawServer.RawServer
|
||||
Handler = RawServer.Handler
|
||||
|
||||
def switch_rawserver(choice):
|
||||
if magic.too_late:
|
||||
raise BTFailure(_("Too late to switch RawServer backends, %s has already been used.") % str(magic.base))
|
||||
|
||||
if choice.lower() == "twisted":
|
||||
import RawServer_twisted
|
||||
magic.base = RawServer_twisted.RawServer
|
||||
else:
|
||||
import RawServer
|
||||
magic.base = RawServer.RawServer
|
||||
|
||||
class _RawServerMetaclass:
|
||||
def __init__(self, *args):
|
||||
pass
|
||||
|
||||
def __getattr__(self, name):
|
||||
magic.too_late = True
|
||||
try:
|
||||
return getattr(magic.base, name)
|
||||
except:
|
||||
raise AttributeError, name
|
||||
|
||||
class RawServer:
|
||||
__metaclass__ = _RawServerMetaclass
|
||||
def __init__(self, *args, **kwargs):
|
||||
magic.too_late = True
|
||||
self.instance = magic.base(*args, **kwargs)
|
||||
|
||||
def __getattr__(self, name):
|
||||
try:
|
||||
return getattr(self.instance, name)
|
||||
except:
|
||||
raise AttributeError, name
|
||||
|
621
BitTorrent/RawServer_twisted.py
Executable file
621
BitTorrent/RawServer_twisted.py
Executable file
@ -0,0 +1,621 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Greg Hazel
|
||||
|
||||
import os
|
||||
import sys
|
||||
import socket
|
||||
import signal
|
||||
import struct
|
||||
import thread
|
||||
from cStringIO import StringIO
|
||||
from traceback import print_exc, print_stack
|
||||
|
||||
from BitTorrent import BTFailure, WARNING, CRITICAL, FAQ_URL
|
||||
|
||||
noSignals = True
|
||||
|
||||
if os.name == 'nt':
|
||||
try:
|
||||
from twisted.internet import iocpreactor
|
||||
iocpreactor.proactor.install()
|
||||
noSignals = False
|
||||
except:
|
||||
# just as limited (if not more) as select, and also (supposedly) buggy
|
||||
#try:
|
||||
# from twisted.internet import win32eventreactor
|
||||
# win32eventreactor.install()
|
||||
#except:
|
||||
# pass
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
from twisted.internet import kqreactor
|
||||
kqreactor.install()
|
||||
except:
|
||||
try:
|
||||
from twisted.internet import pollreactor
|
||||
pollreactor.install()
|
||||
except:
|
||||
pass
|
||||
|
||||
#the default reactor is select-based, and will be install()ed if another has not
|
||||
from twisted.internet import reactor, task, error
|
||||
|
||||
import twisted.copyright
|
||||
if int(twisted.copyright.version.split('.')[0]) < 2:
|
||||
raise ImportError("RawServer_twisted requires twisted 2.0.0 or greater")
|
||||
|
||||
from twisted.internet.protocol import DatagramProtocol, Protocol, Factory, ClientFactory
|
||||
from twisted.protocols.policies import TimeoutMixin
|
||||
|
||||
NOLINGER = struct.pack('ii', 1, 0)
|
||||
|
||||
class Handler(object):
|
||||
|
||||
# there is only a semantic difference between "made" and "started".
|
||||
# I prefer "started"
|
||||
def connection_started(self, s):
|
||||
self.connection_made(s)
|
||||
def connection_made(self, s):
|
||||
pass
|
||||
|
||||
def connection_lost(self, s):
|
||||
pass
|
||||
|
||||
# Maybe connection_lost should just have a default 'None' exception parameter
|
||||
def connection_failed(self, addr, exception):
|
||||
pass
|
||||
|
||||
def connection_flushed(self, s):
|
||||
pass
|
||||
def data_came_in(self, addr, datagram):
|
||||
pass
|
||||
|
||||
class ConnectionWrapper(object):
|
||||
def __init__(self, rawserver, handler, context, tos=0):
|
||||
self.dying = 0
|
||||
self.ip = None
|
||||
self.port = None
|
||||
self.transport = None
|
||||
self.reset_timeout = None
|
||||
|
||||
self.post_init(rawserver, handler, context)
|
||||
|
||||
self.tos = tos
|
||||
|
||||
self.buffer = OutputBuffer(self)
|
||||
|
||||
def post_init(self, rawserver, handler, context):
|
||||
self.rawserver = rawserver
|
||||
self.handler = handler
|
||||
self.context = context
|
||||
if self.rawserver:
|
||||
self.rawserver.single_sockets[self] = self
|
||||
|
||||
def get_socket(self):
|
||||
s = None
|
||||
try:
|
||||
s = self.transport.getHandle()
|
||||
except:
|
||||
try:
|
||||
# iocpreactor doesn't implement ISystemHandle like it should
|
||||
s = self.transport.socket
|
||||
except:
|
||||
pass
|
||||
return s
|
||||
|
||||
def attach_transport(self, transport, reset_timeout):
|
||||
self.transport = transport
|
||||
self.reset_timeout = reset_timeout
|
||||
|
||||
try:
|
||||
address = self.transport.getPeer()
|
||||
except:
|
||||
try:
|
||||
# udp, for example
|
||||
address = self.transport.getHost()
|
||||
except:
|
||||
if not self.transport.__dict__.has_key("state"):
|
||||
self.transport.state = "NO STATE!"
|
||||
sys.stderr.write("UNKNOWN HOST/PEER: " + str(self.transport) + ":" + str(self.transport.state)+ ":" + str(self.handler) + "\n")
|
||||
print_stack()
|
||||
# fallback incase the unknown happens,
|
||||
# there's no use raising an exception
|
||||
address = ("unknown", -1)
|
||||
pass
|
||||
|
||||
try:
|
||||
self.ip = address.host
|
||||
self.port = address.port
|
||||
except:
|
||||
#unix sockets, for example
|
||||
pass
|
||||
|
||||
if self.tos != 0:
|
||||
s = self.get_socket()
|
||||
|
||||
try:
|
||||
s.setsockopt(socket.IPPROTO_IP, socket.IP_TOS, self.tos)
|
||||
except:
|
||||
pass
|
||||
|
||||
def sendto(self, packet, flags, addr):
|
||||
# all this can go away once we pin down the bug
|
||||
if not hasattr(self.transport, "listening"):
|
||||
self.rawserver.errorfunc(WARNING, "UDP port never setup properly when asked to write")
|
||||
elif not self.transport.listening:
|
||||
self.rawserver.errorfunc(WARNING, "UDP port cleaned up already when asked to write")
|
||||
|
||||
ret = None
|
||||
try:
|
||||
ret = self.transport.write(packet, addr)
|
||||
except Exception, e:
|
||||
self.rawserver.errorfunc(WARNING, "UDP sendto failed: %s" % str(e))
|
||||
|
||||
return ret
|
||||
|
||||
def write(self, b):
|
||||
self.buffer.add(b)
|
||||
|
||||
def _flushed(self):
|
||||
s = self
|
||||
#why do you tease me so?
|
||||
if s.handler is not None:
|
||||
#calling flushed from the write is bad form
|
||||
self.rawserver.add_task(s.handler.connection_flushed, 0, (s,))
|
||||
|
||||
def is_flushed(self):
|
||||
return self.buffer.is_flushed()
|
||||
|
||||
def shutdown(self, how):
|
||||
if how == socket.SHUT_WR:
|
||||
self.transport.loseWriteConnection()
|
||||
self.buffer.stopWriting()
|
||||
elif how == socket.SHUT_RD:
|
||||
self.transport.stopListening()
|
||||
else:
|
||||
self.close()
|
||||
|
||||
def close(self):
|
||||
self.buffer.stopWriting()
|
||||
|
||||
# opt for no "connection_lost" callback since we know that
|
||||
self.dying = 1
|
||||
|
||||
if self.rawserver.config['close_with_rst']:
|
||||
try:
|
||||
s = self.get_socket()
|
||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, NOLINGER)
|
||||
except:
|
||||
pass
|
||||
|
||||
if self.rawserver.udp_sockets.has_key(self):
|
||||
# udp connections should only call stopListening
|
||||
self.transport.stopListening()
|
||||
else:
|
||||
self.transport.loseConnection()
|
||||
|
||||
|
||||
class OutputBuffer(object):
|
||||
|
||||
def __init__(self, connection):
|
||||
self.connection = connection
|
||||
self.consumer = None
|
||||
self.buffer = StringIO()
|
||||
|
||||
def is_flushed(self):
|
||||
return (self.buffer.tell() == 0)
|
||||
|
||||
def add(self, b):
|
||||
# sometimes we get strings, sometimes we get buffers. ugg.
|
||||
if (isinstance(b, buffer)):
|
||||
b = str(b)
|
||||
self.buffer.write(b)
|
||||
|
||||
if self.consumer is None:
|
||||
self.beginWriting()
|
||||
|
||||
def beginWriting(self):
|
||||
self.stopWriting()
|
||||
self.consumer = self.connection.transport
|
||||
self.consumer.registerProducer(self, False)
|
||||
|
||||
def stopWriting(self):
|
||||
if self.consumer is not None:
|
||||
self.consumer.unregisterProducer()
|
||||
self.consumer = None
|
||||
|
||||
def resumeProducing(self):
|
||||
if self.consumer is not None:
|
||||
if self.buffer.tell() > 0:
|
||||
self.consumer.write(self.buffer.getvalue())
|
||||
self.buffer.seek(0)
|
||||
self.buffer.truncate(0)
|
||||
self.connection._flushed()
|
||||
else:
|
||||
self.stopWriting()
|
||||
|
||||
|
||||
def pauseProducing(self):
|
||||
pass
|
||||
|
||||
def stopProducing(self):
|
||||
pass
|
||||
|
||||
class CallbackConnection(object):
|
||||
|
||||
def attachTransport(self, transport, connection, *args):
|
||||
s = connection
|
||||
if s is None:
|
||||
s = ConnectionWrapper(*args)
|
||||
|
||||
s.attach_transport(transport, self.optionalResetTimeout)
|
||||
self.connection = s
|
||||
|
||||
def connectionMade(self):
|
||||
s = self.connection
|
||||
s.handler.connection_started(s)
|
||||
self.optionalResetTimeout()
|
||||
|
||||
def connectionLost(self, reason):
|
||||
reactor.callLater(0, self.post_connectionLost, reason)
|
||||
|
||||
#twisted api inconsistancy workaround
|
||||
#sometimes connectionLost is called (not fired) from inside write()
|
||||
def post_connectionLost(self, reason):
|
||||
#hack to try and dig up the connection if one was ever made
|
||||
if not self.__dict__.has_key("connection"):
|
||||
self.connection = self.factory.connection
|
||||
if self.connection is not None:
|
||||
self.factory.rawserver._remove_socket(self.connection)
|
||||
|
||||
def dataReceived(self, data):
|
||||
self.optionalResetTimeout()
|
||||
|
||||
s = self.connection
|
||||
s.rawserver._make_wrapped_call(s.handler.data_came_in,
|
||||
(s, data), s)
|
||||
|
||||
def datagramReceived(self, data, (host, port)):
|
||||
s = self.connection
|
||||
s.rawserver._make_wrapped_call(s.handler.data_came_in,
|
||||
((host, port), data), s)
|
||||
|
||||
def connectionRefused(self):
|
||||
s = self.connection
|
||||
dns = (s.ip, s.port)
|
||||
reason = "connection refused"
|
||||
|
||||
if not s.dying:
|
||||
# this might not work - reason is not an exception
|
||||
s.handler.connection_failed(dns, reason)
|
||||
|
||||
#so we don't get failed then closed
|
||||
s.dying = 1
|
||||
|
||||
s.rawserver._remove_socket(s)
|
||||
|
||||
def optionalResetTimeout(self):
|
||||
if self.can_timeout:
|
||||
self.resetTimeout()
|
||||
|
||||
class CallbackProtocol(CallbackConnection, TimeoutMixin, Protocol):
|
||||
|
||||
def makeConnection(self, transport):
|
||||
self.can_timeout = 1
|
||||
self.setTimeout(self.factory.rawserver.config['socket_timeout'])
|
||||
self.attachTransport(transport, self.factory.connection, *self.factory.connection_args)
|
||||
Protocol.makeConnection(self, transport)
|
||||
|
||||
class CallbackDatagramProtocol(CallbackConnection, DatagramProtocol):
|
||||
|
||||
def startProtocol(self):
|
||||
self.can_timeout = 0
|
||||
self.attachTransport(self.transport, self.connection, ())
|
||||
DatagramProtocol.startProtocol(self)
|
||||
|
||||
def connectionRefused(self):
|
||||
# we don't use these at all for udp, so skip the CallbackConnection
|
||||
DatagramProtocol.connectionRefused(self)
|
||||
|
||||
class OutgoingConnectionFactory(ClientFactory):
|
||||
|
||||
def clientConnectionFailed(self, connector, reason):
|
||||
peer = connector.getDestination()
|
||||
dns = (peer.host, peer.port)
|
||||
# opt-out
|
||||
if not self.connection.dying:
|
||||
# this might not work - reason is not an exception
|
||||
self.connection.handler.connection_failed(dns, reason)
|
||||
|
||||
#so we don't get failed then closed
|
||||
self.connection.dying = 1
|
||||
|
||||
self.rawserver._remove_socket(self.connection)
|
||||
|
||||
def UnimplementedWarning(msg):
|
||||
#ok, I think people get the message
|
||||
#print "UnimplementedWarning: " + str(msg)
|
||||
pass
|
||||
|
||||
#Encoder calls stop_listening(socket) then socket.close()
|
||||
#to us they mean the same thing, so swallow the second call
|
||||
class CloseSwallower:
|
||||
def close(self):
|
||||
pass
|
||||
|
||||
#storage for socket creation requestions, and proxy once the connection is made
|
||||
class SocketProxy(object):
|
||||
def __init__(self, port, bind, reuse, tos, protocol):
|
||||
self.port = port
|
||||
self.bind = bind
|
||||
self.reuse = reuse
|
||||
self.tos = tos
|
||||
self.protocol = protocol
|
||||
self.connection = None
|
||||
|
||||
def __getattr__(self, name):
|
||||
try:
|
||||
return getattr(self.connection, name)
|
||||
except:
|
||||
raise AttributeError, name
|
||||
|
||||
def default_error_handler(level, message):
|
||||
print message
|
||||
|
||||
class RawServerMixin(object):
|
||||
|
||||
def __init__(self, doneflag, config, noisy=True,
|
||||
errorfunc=default_error_handler, tos=0):
|
||||
self.doneflag = doneflag
|
||||
self.noisy = noisy
|
||||
self.errorfunc = errorfunc
|
||||
self.config = config
|
||||
self.tos = tos
|
||||
self.ident = thread.get_ident()
|
||||
|
||||
def _make_wrapped_call(self, function, args, wrapper=None, context=None):
|
||||
try:
|
||||
function(*args)
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except Exception, e: # hopefully nothing raises strings
|
||||
# Incoming sockets can be assigned to a particular torrent during
|
||||
# a data_came_in call, and it's possible (though not likely) that
|
||||
# there could be a torrent-specific exception during the same call.
|
||||
# Therefore read the context after the call.
|
||||
if wrapper is not None:
|
||||
context = wrapper.context
|
||||
if self.noisy and context is None:
|
||||
data = StringIO()
|
||||
print_exc(file=data)
|
||||
data.seek(-1)
|
||||
self.errorfunc(CRITICAL, data.read())
|
||||
if context is not None:
|
||||
context.got_exception(e)
|
||||
|
||||
# must be called from the main thread
|
||||
def install_sigint_handler(self):
|
||||
signal.signal(signal.SIGINT, self._handler)
|
||||
|
||||
def _handler(self, signum, frame):
|
||||
self.external_add_task(self.doneflag.set, 0)
|
||||
# Allow pressing ctrl-c multiple times to raise KeyboardInterrupt,
|
||||
# in case the program is in an infinite loop
|
||||
signal.signal(signal.SIGINT, signal.default_int_handler)
|
||||
|
||||
class RawServer(RawServerMixin):
|
||||
|
||||
def __init__(self, doneflag, config, noisy=True,
|
||||
errorfunc=default_error_handler, tos=0):
|
||||
RawServerMixin.__init__(self, doneflag, config, noisy, errorfunc, tos)
|
||||
|
||||
self.listening_handlers = {}
|
||||
self.single_sockets = {}
|
||||
self.udp_sockets = {}
|
||||
self.live_contexts = {None : 1}
|
||||
self.listened = 0
|
||||
|
||||
def add_context(self, context):
|
||||
self.live_contexts[context] = 1
|
||||
|
||||
def remove_context(self, context):
|
||||
del self.live_contexts[context]
|
||||
|
||||
def autoprune(self, f, *a, **kw):
|
||||
if self.live_contexts.has_key(kw['context']):
|
||||
f(*a, **kw)
|
||||
|
||||
def add_task(self, func, delay, args=(), context=None):
|
||||
assert thread.get_ident() == self.ident
|
||||
assert type(args) == list or type(args) == tuple
|
||||
|
||||
#we're going to check again later anyway
|
||||
#if self.live_contexts.has_key(context):
|
||||
reactor.callLater(delay, self.autoprune, self._make_wrapped_call,
|
||||
func, args, context=context)
|
||||
|
||||
def external_add_task(self, func, delay, args=(), context=None):
|
||||
assert type(args) == list or type(args) == tuple
|
||||
reactor.callFromThread(self.add_task, func, delay, args, context)
|
||||
|
||||
def create_unixserversocket(filename):
|
||||
s = SocketProxy(0, filename, True, 0, 'tcp')
|
||||
s.factory = Factory()
|
||||
|
||||
if s.reuse == False:
|
||||
UnimplementedWarning("You asked for reuse to be off when binding. Sorry, I can't do that.")
|
||||
|
||||
listening_port = reactor.listenUNIX(s.bind, s.factory)
|
||||
listening_port.listening = 1
|
||||
s.listening_port = listening_port
|
||||
|
||||
return s
|
||||
create_unixserversocket = staticmethod(create_unixserversocket)
|
||||
|
||||
def create_serversocket(port, bind='', reuse=False, tos=0):
|
||||
s = SocketProxy(port, bind, reuse, tos, 'tcp')
|
||||
s.factory = Factory()
|
||||
|
||||
if s.reuse == False:
|
||||
UnimplementedWarning("You asked for reuse to be off when binding. Sorry, I can't do that.")
|
||||
|
||||
try:
|
||||
listening_port = reactor.listenTCP(s.port, s.factory, interface=s.bind)
|
||||
except error.CannotListenError, e:
|
||||
if e[0] != 0:
|
||||
raise e.socketError
|
||||
else:
|
||||
raise
|
||||
listening_port.listening = 1
|
||||
s.listening_port = listening_port
|
||||
|
||||
return s
|
||||
create_serversocket = staticmethod(create_serversocket)
|
||||
|
||||
def create_udpsocket(port, bind='', reuse=False, tos=0):
|
||||
s = SocketProxy(port, bind, reuse, tos, 'udp')
|
||||
s.protocol = CallbackDatagramProtocol()
|
||||
c = ConnectionWrapper(None, None, None, tos)
|
||||
s.connection = c
|
||||
s.protocol.connection = c
|
||||
|
||||
if s.reuse == False:
|
||||
UnimplementedWarning("You asked for reuse to be off when binding. Sorry, I can't do that.")
|
||||
|
||||
try:
|
||||
listening_port = reactor.listenUDP(s.port, s.protocol, interface=s.bind)
|
||||
except error.CannotListenError, e:
|
||||
raise e.socketError
|
||||
listening_port.listening = 1
|
||||
s.listening_port = listening_port
|
||||
|
||||
return s
|
||||
create_udpsocket = staticmethod(create_udpsocket)
|
||||
|
||||
def start_listening(self, serversocket, handler, context=None):
|
||||
s = serversocket
|
||||
s.factory.rawserver = self
|
||||
s.factory.protocol = CallbackProtocol
|
||||
s.factory.connection = None
|
||||
s.factory.connection_args = (self, handler, context, serversocket.tos)
|
||||
|
||||
if not s.listening_port.listening:
|
||||
s.listening_port.startListening()
|
||||
s.listening_port.listening = 1
|
||||
|
||||
self.listening_handlers[s] = s.listening_port
|
||||
|
||||
#provides a harmless close() method
|
||||
s.connection = CloseSwallower()
|
||||
|
||||
def start_listening_udp(self, serversocket, handler, context=None):
|
||||
s = serversocket
|
||||
|
||||
c = s.connection
|
||||
c.post_init(self, handler, context)
|
||||
|
||||
if not s.listening_port.listening:
|
||||
s.listening_port.startListening()
|
||||
s.listening_port.listening = 1
|
||||
|
||||
self.listening_handlers[serversocket] = s.listening_port
|
||||
|
||||
self.udp_sockets[c] = c
|
||||
|
||||
def stop_listening(self, serversocket):
|
||||
listening_port = self.listening_handlers[serversocket]
|
||||
try:
|
||||
listening_port.stopListening()
|
||||
listening_port.listening = 0
|
||||
except error.NotListeningError:
|
||||
pass
|
||||
del self.listening_handlers[serversocket]
|
||||
|
||||
def stop_listening_udp(self, serversocket):
|
||||
listening_port = self.listening_handlers[serversocket]
|
||||
listening_port.stopListening()
|
||||
del self.listening_handlers[serversocket]
|
||||
del self.udp_sockets[serversocket.connection]
|
||||
del self.single_sockets[serversocket.connection]
|
||||
|
||||
def start_connection(self, dns, handler, context=None, do_bind=True):
|
||||
addr = dns[0]
|
||||
port = int(dns[1])
|
||||
|
||||
bindaddr = None
|
||||
if do_bind:
|
||||
bindaddr = self.config['bind']
|
||||
if bindaddr and len(bindaddr) >= 0:
|
||||
bindaddr = (bindaddr, 0)
|
||||
else:
|
||||
bindaddr = None
|
||||
|
||||
factory = OutgoingConnectionFactory()
|
||||
factory.protocol = CallbackProtocol
|
||||
factory.rawserver = self
|
||||
|
||||
c = ConnectionWrapper(self, handler, context, self.tos)
|
||||
|
||||
factory.connection = c
|
||||
factory.connection_args = ()
|
||||
|
||||
connector = reactor.connectTCP(addr, port, factory, bindAddress=bindaddr)
|
||||
|
||||
self.single_sockets[c] = c
|
||||
return c
|
||||
|
||||
def async_start_connection(self, dns, handler, context=None, do_bind=True):
|
||||
self.start_connection(dns, handler, context, do_bind)
|
||||
|
||||
def wrap_socket(self, sock, handler, context=None, ip=None):
|
||||
raise Unimplemented("wrap_socket")
|
||||
|
||||
def listen_forever(self):
|
||||
self.ident = thread.get_ident()
|
||||
if self.listened:
|
||||
UnimplementedWarning("listen_forever() should only be called once per reactor.")
|
||||
self.listened = 1
|
||||
|
||||
l = task.LoopingCall(self.stop)
|
||||
l.start(1, now = False)
|
||||
|
||||
if noSignals:
|
||||
reactor.run(installSignalHandlers=False)
|
||||
else:
|
||||
reactor.run()
|
||||
|
||||
def listen_once(self, period=1e9):
|
||||
UnimplementedWarning("listen_once() Might not return until there is activity, and might not process the event you want. Use listen_forever().")
|
||||
reactor.iterate(period)
|
||||
|
||||
def stop(self):
|
||||
if (self.doneflag.isSet()):
|
||||
|
||||
for connection in self.single_sockets.values():
|
||||
try:
|
||||
#I think this still sends all the data
|
||||
connection.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
reactor.suggestThreadPoolSize(0)
|
||||
reactor.stop()
|
||||
|
||||
def _remove_socket(self, s):
|
||||
# opt-out
|
||||
if not s.dying:
|
||||
self._make_wrapped_call(s.handler.connection_lost, (s,), s)
|
||||
s.handler = None
|
||||
|
||||
del self.single_sockets[s]
|
||||
|
291
BitTorrent/Rerequester.py
Executable file
291
BitTorrent/Rerequester.py
Executable file
@ -0,0 +1,291 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Bram Cohen, Uoti Urpala
|
||||
|
||||
from threading import Thread
|
||||
from socket import error, gethostbyname
|
||||
from random import random, randrange
|
||||
from binascii import b2a_hex
|
||||
|
||||
from BitTorrent import version
|
||||
from BitTorrent.platform import bttime
|
||||
from BitTorrent.zurllib import urlopen, quote, Request
|
||||
from BitTorrent.btformats import check_peers
|
||||
from BitTorrent.bencode import bencode, bdecode
|
||||
from BitTorrent import BTFailure, INFO, WARNING, ERROR, CRITICAL
|
||||
|
||||
|
||||
class Rerequester(object):
|
||||
|
||||
def __init__(self, url, config, sched, howmany, connect, externalsched,
|
||||
amount_left, up, down, port, myid, infohash, errorfunc, doneflag,
|
||||
upratefunc, downratefunc, ever_got_incoming, diefunc, sfunc):
|
||||
self.baseurl = url
|
||||
self.infohash = infohash
|
||||
self.peerid = None
|
||||
self.wanted_peerid = myid
|
||||
self.port = port
|
||||
self.url = None
|
||||
self.config = config
|
||||
self.last = None
|
||||
self.trackerid = None
|
||||
self.announce_interval = 30 * 60
|
||||
self.sched = sched
|
||||
self.howmany = howmany
|
||||
self.connect = connect
|
||||
self.externalsched = externalsched
|
||||
self.amount_left = amount_left
|
||||
self.up = up
|
||||
self.down = down
|
||||
self.errorfunc = errorfunc
|
||||
self.doneflag = doneflag
|
||||
self.upratefunc = upratefunc
|
||||
self.downratefunc = downratefunc
|
||||
self.ever_got_incoming = ever_got_incoming
|
||||
self.diefunc = diefunc
|
||||
self.successfunc = sfunc
|
||||
self.finish = False
|
||||
self.current_started = None
|
||||
self.fail_wait = None
|
||||
self.last_time = None
|
||||
self.previous_down = 0
|
||||
self.previous_up = 0
|
||||
self.tracker_num_peers = None
|
||||
self.tracker_num_seeds = None
|
||||
|
||||
def _makeurl(self, peerid, port):
|
||||
return ('%s?info_hash=%s&peer_id=%s&port=%s&key=%s' %
|
||||
(self.baseurl, quote(self.infohash), quote(peerid), str(port),
|
||||
b2a_hex(''.join([chr(randrange(256)) for i in xrange(4)]))))
|
||||
|
||||
def change_port(self, peerid, port):
|
||||
self.wanted_peerid = peerid
|
||||
self.port = port
|
||||
self.last = None
|
||||
self.trackerid = None
|
||||
self._check()
|
||||
|
||||
def begin(self):
|
||||
if self.sched:
|
||||
self.sched(self.begin, 60)
|
||||
self._check()
|
||||
|
||||
def announce_finish(self):
|
||||
self.finish = True
|
||||
self._check()
|
||||
|
||||
def announce_stop(self):
|
||||
self._announce(2)
|
||||
|
||||
def _check(self):
|
||||
if self.current_started is not None:
|
||||
if self.current_started <= bttime() - 58:
|
||||
self.errorfunc(WARNING,
|
||||
_("Tracker announce still not complete "
|
||||
"%d seconds after starting it") %
|
||||
int(bttime() - self.current_started))
|
||||
return
|
||||
if self.peerid is None:
|
||||
self.peerid = self.wanted_peerid
|
||||
self.url = self._makeurl(self.peerid, self.port)
|
||||
self._announce(0)
|
||||
return
|
||||
if self.peerid != self.wanted_peerid:
|
||||
self._announce(2)
|
||||
self.peerid = None
|
||||
self.previous_up = self.up()
|
||||
self.previous_down = self.down()
|
||||
return
|
||||
if self.finish:
|
||||
self.finish = False
|
||||
self._announce(1)
|
||||
return
|
||||
if self.fail_wait is not None:
|
||||
if self.last_time + self.fail_wait <= bttime():
|
||||
self._announce()
|
||||
return
|
||||
if self.last_time > bttime() - self.config['rerequest_interval']:
|
||||
return
|
||||
if self.ever_got_incoming():
|
||||
getmore = self.howmany() <= self.config['min_peers'] / 3
|
||||
else:
|
||||
getmore = self.howmany() < self.config['min_peers']
|
||||
if getmore or bttime() - self.last_time > self.announce_interval:
|
||||
self._announce()
|
||||
|
||||
def _announce(self, event=None):
|
||||
self.current_started = bttime()
|
||||
s = ('%s&uploaded=%s&downloaded=%s&left=%s' %
|
||||
(self.url, str(self.up() - self.previous_up),
|
||||
str(self.down() - self.previous_down), str(self.amount_left())))
|
||||
if self.last is not None:
|
||||
s += '&last=' + quote(str(self.last))
|
||||
if self.trackerid is not None:
|
||||
s += '&trackerid=' + quote(str(self.trackerid))
|
||||
if self.howmany() >= self.config['max_initiate']:
|
||||
s += '&numwant=0'
|
||||
else:
|
||||
s += '&compact=1'
|
||||
if event is not None:
|
||||
s += '&event=' + ['started', 'completed', 'stopped'][event]
|
||||
Thread(target=self._rerequest, args=[s, self.peerid]).start()
|
||||
|
||||
# Must destroy all references that could cause reference circles
|
||||
def cleanup(self):
|
||||
self.sched = None
|
||||
self.howmany = None
|
||||
self.connect = None
|
||||
self.externalsched = lambda *args: None
|
||||
self.amount_left = None
|
||||
self.up = None
|
||||
self.down = None
|
||||
self.errorfunc = None
|
||||
self.upratefunc = None
|
||||
self.downratefunc = None
|
||||
self.ever_got_incoming = None
|
||||
self.diefunc = None
|
||||
self.successfunc = None
|
||||
|
||||
def _rerequest(self, url, peerid):
|
||||
if self.config['ip']:
|
||||
try:
|
||||
url += '&ip=' + gethostbyname(self.config['ip'])
|
||||
except Exception, e:
|
||||
self.errorfunc(WARNING, _("Problem connecting to tracker, gethostbyname failed - ") + str(e))
|
||||
request = Request(url)
|
||||
request.add_header('User-Agent', 'BitTorrent/' + version)
|
||||
if self.config['tracker_proxy']:
|
||||
request.set_proxy(self.config['tracker_proxy'], 'http')
|
||||
try:
|
||||
h = urlopen(request)
|
||||
data = h.read()
|
||||
h.close()
|
||||
# urllib2 can raise various crap that doesn't have a common base
|
||||
# exception class especially when proxies are used, at least
|
||||
# ValueError and stuff from httplib
|
||||
except Exception, e:
|
||||
def f(r=_("Problem connecting to tracker - ") + str(e)):
|
||||
self._postrequest(errormsg=r, peerid=peerid)
|
||||
else:
|
||||
def f():
|
||||
self._postrequest(data, peerid=peerid)
|
||||
self.externalsched(f, 0)
|
||||
|
||||
def _fail(self):
|
||||
if self.fail_wait is None:
|
||||
self.fail_wait = 50
|
||||
else:
|
||||
self.fail_wait *= 1.4 + random() * .2
|
||||
self.fail_wait = min(self.fail_wait,
|
||||
self.config['max_announce_retry_interval'])
|
||||
|
||||
def _postrequest(self, data=None, errormsg=None, peerid=None):
|
||||
self.current_started = None
|
||||
self.last_time = bttime()
|
||||
if errormsg is not None:
|
||||
self.errorfunc(WARNING, errormsg)
|
||||
self._fail()
|
||||
return
|
||||
try:
|
||||
r = bdecode(data)
|
||||
check_peers(r)
|
||||
except BTFailure, e:
|
||||
if data != '':
|
||||
self.errorfunc(ERROR, _("bad data from tracker - ") + str(e))
|
||||
self._fail()
|
||||
return
|
||||
if type(r.get('complete')) in (int, long) and \
|
||||
type(r.get('incomplete')) in (int, long):
|
||||
self.tracker_num_seeds = r['complete']
|
||||
self.tracker_num_peers = r['incomplete']
|
||||
else:
|
||||
self.tracker_num_seeds = self.tracker_num_peers = None
|
||||
if r.has_key('failure reason'):
|
||||
if self.howmany() > 0:
|
||||
self.errorfunc(ERROR, _("rejected by tracker - ") +
|
||||
r['failure reason'])
|
||||
else:
|
||||
# sched shouldn't be strictly necessary
|
||||
def die():
|
||||
self.diefunc(CRITICAL,
|
||||
_("Aborting the torrent as it was rejected by "
|
||||
"the tracker while not connected to any peers. ") +
|
||||
_(" Message from the tracker: ") + r['failure reason'])
|
||||
self.sched(die, 0)
|
||||
self._fail()
|
||||
else:
|
||||
self.fail_wait = None
|
||||
if r.has_key('warning message'):
|
||||
self.errorfunc(ERROR, _("warning from tracker - ") +
|
||||
r['warning message'])
|
||||
self.announce_interval = r.get('interval', self.announce_interval)
|
||||
self.config['rerequest_interval'] = r.get('min interval',
|
||||
self.config['rerequest_interval'])
|
||||
self.trackerid = r.get('tracker id', self.trackerid)
|
||||
self.last = r.get('last')
|
||||
p = r['peers']
|
||||
peers = []
|
||||
if type(p) == str:
|
||||
for x in xrange(0, len(p), 6):
|
||||
ip = '.'.join([str(ord(i)) for i in p[x:x+4]])
|
||||
port = (ord(p[x+4]) << 8) | ord(p[x+5])
|
||||
peers.append((ip, port, None))
|
||||
else:
|
||||
for x in p:
|
||||
peers.append((x['ip'], x['port'], x.get('peer id')))
|
||||
ps = len(peers) + self.howmany()
|
||||
if ps < self.config['max_initiate']:
|
||||
if self.doneflag.isSet():
|
||||
if r.get('num peers', 1000) - r.get('done peers', 0) > ps * 1.2:
|
||||
self.last = None
|
||||
else:
|
||||
if r.get('num peers', 1000) > ps * 1.2:
|
||||
self.last = None
|
||||
for x in peers:
|
||||
self.connect((x[0], x[1]), x[2])
|
||||
if peerid == self.wanted_peerid:
|
||||
self.successfunc()
|
||||
self._check()
|
||||
|
||||
class DHTRerequester(Rerequester):
|
||||
def __init__(self, config, sched, howmany, connect, externalsched,
|
||||
amount_left, up, down, port, myid, infohash, errorfunc, doneflag,
|
||||
upratefunc, downratefunc, ever_got_incoming, diefunc, sfunc, dht):
|
||||
self.dht = dht
|
||||
Rerequester.__init__(self, "http://localhost/announce", config, sched, howmany, connect, externalsched,
|
||||
amount_left, up, down, port, myid, infohash, errorfunc, doneflag,
|
||||
upratefunc, downratefunc, ever_got_incoming, diefunc, sfunc)
|
||||
|
||||
def _announce(self, event=None):
|
||||
self.current_started = bttime()
|
||||
self._rerequest("", self.peerid)
|
||||
|
||||
def _rerequest(self, url, peerid):
|
||||
self.peers = ""
|
||||
try:
|
||||
self.dht.getPeersAndAnnounce(self.infohash, self.port, self._got_peers)
|
||||
except Exception, e:
|
||||
self._postrequest(errormsg="Trackerless lookup failed: " + str(e), peerid=self.wanted_peerid)
|
||||
|
||||
def _got_peers(self, peers):
|
||||
if not self.howmany:
|
||||
return
|
||||
if not peers:
|
||||
self._postrequest(bencode({'peers':''}), peerid=self.wanted_peerid)
|
||||
else:
|
||||
self._postrequest(bencode({'peers':peers[0]}), peerid=None)
|
||||
|
||||
def _announced_peers(self, nodes):
|
||||
pass
|
||||
|
||||
def announce_stop(self):
|
||||
# don't do anything
|
||||
pass
|
273
BitTorrent/Storage.py
Executable file
273
BitTorrent/Storage.py
Executable file
@ -0,0 +1,273 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Bram Cohen
|
||||
|
||||
import os
|
||||
from bisect import bisect_right
|
||||
from array import array
|
||||
|
||||
from BitTorrent.obsoletepythonsupport import *
|
||||
|
||||
from BitTorrent import BTFailure
|
||||
|
||||
|
||||
class FilePool(object):
|
||||
|
||||
def __init__(self, max_files_open):
|
||||
self.allfiles = {}
|
||||
self.handlebuffer = None
|
||||
self.handles = {}
|
||||
self.whandles = {}
|
||||
self.set_max_files_open(max_files_open)
|
||||
|
||||
def close_all(self):
|
||||
failures = {}
|
||||
for filename, handle in self.handles.iteritems():
|
||||
try:
|
||||
handle.close()
|
||||
except Exception, e:
|
||||
failures[self.allfiles[filename]] = e
|
||||
self.handles.clear()
|
||||
self.whandles.clear()
|
||||
if self.handlebuffer is not None:
|
||||
del self.handlebuffer[:]
|
||||
for torrent, e in failures.iteritems():
|
||||
torrent.got_exception(e)
|
||||
|
||||
def set_max_files_open(self, max_files_open):
|
||||
if max_files_open <= 0:
|
||||
max_files_open = 1e100
|
||||
self.max_files_open = max_files_open
|
||||
self.close_all()
|
||||
if len(self.allfiles) > self.max_files_open:
|
||||
self.handlebuffer = []
|
||||
else:
|
||||
self.handlebuffer = None
|
||||
|
||||
def add_files(self, files, torrent):
|
||||
for filename in files:
|
||||
if filename in self.allfiles:
|
||||
raise BTFailure(_("File %s belongs to another running torrent")
|
||||
% filename)
|
||||
for filename in files:
|
||||
self.allfiles[filename] = torrent
|
||||
if self.handlebuffer is None and \
|
||||
len(self.allfiles) > self.max_files_open:
|
||||
self.handlebuffer = self.handles.keys()
|
||||
|
||||
def remove_files(self, files):
|
||||
for filename in files:
|
||||
del self.allfiles[filename]
|
||||
if self.handlebuffer is not None and \
|
||||
len(self.allfiles) <= self.max_files_open:
|
||||
self.handlebuffer = None
|
||||
|
||||
|
||||
# Make this a separate function because having this code in Storage.__init__()
|
||||
# would make python print a SyntaxWarning (uses builtin 'file' before 'global')
|
||||
|
||||
def bad_libc_workaround():
|
||||
global file
|
||||
def file(name, mode = 'r', buffering = None):
|
||||
return open(name, mode)
|
||||
|
||||
class Storage(object):
|
||||
|
||||
def __init__(self, config, filepool, files, check_only=False):
|
||||
self.filepool = filepool
|
||||
self.config = config
|
||||
self.ranges = []
|
||||
self.myfiles = {}
|
||||
self.tops = {}
|
||||
self.undownloaded = {}
|
||||
self.unallocated = {}
|
||||
total = 0
|
||||
for filename, length in files:
|
||||
self.unallocated[filename] = length
|
||||
self.undownloaded[filename] = length
|
||||
if length > 0:
|
||||
self.ranges.append((total, total + length, filename))
|
||||
self.myfiles[filename] = None
|
||||
total += length
|
||||
if os.path.exists(filename):
|
||||
if not os.path.isfile(filename):
|
||||
raise BTFailure(_("File %s already exists, but is not a "
|
||||
"regular file") % filename)
|
||||
l = os.path.getsize(filename)
|
||||
if l > length and not check_only:
|
||||
h = file(filename, 'rb+')
|
||||
h.truncate(length)
|
||||
h.close()
|
||||
l = length
|
||||
self.tops[filename] = l
|
||||
elif not check_only:
|
||||
f = os.path.split(filename)[0]
|
||||
if f != '' and not os.path.exists(f):
|
||||
os.makedirs(f)
|
||||
file(filename, 'wb').close()
|
||||
self.begins = [i[0] for i in self.ranges]
|
||||
self.total_length = total
|
||||
if check_only:
|
||||
return
|
||||
self.handles = filepool.handles
|
||||
self.whandles = filepool.whandles
|
||||
|
||||
# Rather implement this as an ugly hack here than change all the
|
||||
# individual calls. Affects all torrent instances using this module.
|
||||
if config['bad_libc_workaround']:
|
||||
bad_libc_workaround()
|
||||
|
||||
def was_preallocated(self, pos, length):
|
||||
for filename, begin, end in self._intervals(pos, length):
|
||||
if self.tops.get(filename, 0) < end:
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_total_length(self):
|
||||
return self.total_length
|
||||
|
||||
def _intervals(self, pos, amount):
|
||||
r = []
|
||||
stop = pos + amount
|
||||
p = bisect_right(self.begins, pos) - 1
|
||||
while p < len(self.ranges) and self.ranges[p][0] < stop:
|
||||
begin, end, filename = self.ranges[p]
|
||||
r.append((filename, max(pos, begin) - begin, min(end, stop) - begin))
|
||||
p += 1
|
||||
return r
|
||||
|
||||
def _get_file_handle(self, filename, for_write):
|
||||
handlebuffer = self.filepool.handlebuffer
|
||||
if filename in self.handles:
|
||||
if for_write and filename not in self.whandles:
|
||||
self.handles[filename].close()
|
||||
self.handles[filename] = file(filename, 'rb+', 0)
|
||||
self.whandles[filename] = None
|
||||
if handlebuffer is not None and handlebuffer[-1] != filename:
|
||||
handlebuffer.remove(filename)
|
||||
handlebuffer.append(filename)
|
||||
else:
|
||||
if for_write:
|
||||
self.handles[filename] = file(filename, 'rb+', 0)
|
||||
self.whandles[filename] = None
|
||||
else:
|
||||
self.handles[filename] = file(filename, 'rb', 0)
|
||||
if handlebuffer is not None:
|
||||
if len(handlebuffer) >= self.filepool.max_files_open:
|
||||
oldfile = handlebuffer.pop(0)
|
||||
if oldfile in self.whandles: # .pop() in python 2.3
|
||||
del self.whandles[oldfile]
|
||||
self.handles[oldfile].close()
|
||||
del self.handles[oldfile]
|
||||
handlebuffer.append(filename)
|
||||
return self.handles[filename]
|
||||
|
||||
def read(self, pos, amount):
|
||||
r = []
|
||||
for filename, pos, end in self._intervals(pos, amount):
|
||||
h = self._get_file_handle(filename, False)
|
||||
h.seek(pos)
|
||||
r.append(h.read(end - pos))
|
||||
r = ''.join(r)
|
||||
if len(r) != amount:
|
||||
raise BTFailure(_("Short read - something truncated files?"))
|
||||
return r
|
||||
|
||||
def write(self, pos, s):
|
||||
# might raise an IOError
|
||||
total = 0
|
||||
for filename, begin, end in self._intervals(pos, len(s)):
|
||||
h = self._get_file_handle(filename, True)
|
||||
h.seek(begin)
|
||||
h.write(s[total: total + end - begin])
|
||||
total += end - begin
|
||||
|
||||
def close(self):
|
||||
error = None
|
||||
for filename in self.handles.keys():
|
||||
if filename in self.myfiles:
|
||||
try:
|
||||
self.handles[filename].close()
|
||||
except Exception, e:
|
||||
error = e
|
||||
del self.handles[filename]
|
||||
if filename in self.whandles:
|
||||
del self.whandles[filename]
|
||||
handlebuffer = self.filepool.handlebuffer
|
||||
if handlebuffer is not None:
|
||||
handlebuffer = [f for f in handlebuffer if f not in self.myfiles]
|
||||
self.filepool.handlebuffer = handlebuffer
|
||||
if error is not None:
|
||||
raise error
|
||||
|
||||
def write_fastresume(self, resumefile, amount_done):
|
||||
resumefile.write('BitTorrent resume state file, version 1\n')
|
||||
resumefile.write(str(amount_done) + '\n')
|
||||
for _, _, filename in self.ranges:
|
||||
resumefile.write(str(os.path.getsize(filename)) + ' ' +
|
||||
str(os.path.getmtime(filename)) + '\n')
|
||||
|
||||
def check_fastresume(self, resumefile, return_filelist=False,
|
||||
piece_size=None, numpieces=None, allfiles=None):
|
||||
filenames = [name for _, _, name in self.ranges]
|
||||
if resumefile is not None:
|
||||
version = resumefile.readline()
|
||||
if version != 'BitTorrent resume state file, version 1\n':
|
||||
raise BTFailure(_("Unsupported fastresume file format, "
|
||||
"maybe from another client version?"))
|
||||
amount_done = int(resumefile.readline())
|
||||
else:
|
||||
amount_done = size = mtime = 0
|
||||
for filename in filenames:
|
||||
if resumefile is not None:
|
||||
line = resumefile.readline()
|
||||
size, mtime = line.split()[:2] # allow adding extra fields
|
||||
size = int(size)
|
||||
mtime = int(mtime)
|
||||
if os.path.exists(filename):
|
||||
fsize = os.path.getsize(filename)
|
||||
else:
|
||||
raise BTFailure(_("Another program appears to have moved, renamed, or deleted the file."))
|
||||
if fsize > 0 and mtime != os.path.getmtime(filename):
|
||||
raise BTFailure(_("Another program appears to have modified the file."))
|
||||
if size != fsize:
|
||||
raise BTFailure(_("Another program appears to have changed the file size."))
|
||||
if not return_filelist:
|
||||
return amount_done
|
||||
if resumefile is None:
|
||||
return None
|
||||
if numpieces < 32768:
|
||||
typecode = 'h'
|
||||
else:
|
||||
typecode = 'l'
|
||||
try:
|
||||
r = array(typecode)
|
||||
r.fromfile(resumefile, numpieces)
|
||||
except Exception, e:
|
||||
raise BTFailure(_("Couldn't read fastresume data: ") + str(e) + '.')
|
||||
for i in range(numpieces):
|
||||
if r[i] >= 0:
|
||||
# last piece goes "past the end", doesn't matter
|
||||
self.downloaded(r[i] * piece_size, piece_size)
|
||||
if r[i] != -2:
|
||||
self.allocated(i * piece_size, piece_size)
|
||||
undl = self.undownloaded
|
||||
unal = self.unallocated
|
||||
return amount_done, [undl[x] for x in allfiles], \
|
||||
[not unal[x] for x in allfiles]
|
||||
|
||||
def allocated(self, pos, length):
|
||||
for filename, begin, end in self._intervals(pos, length):
|
||||
self.unallocated[filename] -= end - begin
|
||||
|
||||
def downloaded(self, pos, length):
|
||||
for filename, begin, end in self._intervals(pos, length):
|
||||
self.undownloaded[filename] -= end - begin
|
408
BitTorrent/StorageWrapper.py
Executable file
408
BitTorrent/StorageWrapper.py
Executable file
@ -0,0 +1,408 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Bram Cohen
|
||||
|
||||
from __future__ import division
|
||||
|
||||
from sha import sha
|
||||
from array import array
|
||||
from binascii import b2a_hex
|
||||
|
||||
from BitTorrent.bitfield import Bitfield
|
||||
from BitTorrent import BTFailure, INFO, WARNING, ERROR, CRITICAL
|
||||
|
||||
def toint(s):
|
||||
return int(b2a_hex(s), 16)
|
||||
|
||||
def tobinary(i):
|
||||
return (chr(i >> 24) + chr((i >> 16) & 0xFF) +
|
||||
chr((i >> 8) & 0xFF) + chr(i & 0xFF))
|
||||
|
||||
NO_PLACE = -1
|
||||
|
||||
ALLOCATED = -1
|
||||
UNALLOCATED = -2
|
||||
FASTRESUME_PARTIAL = -3
|
||||
|
||||
class StorageWrapper(object):
|
||||
|
||||
def __init__(self, storage, config, hashes, piece_size, finished,
|
||||
statusfunc, flag, data_flunked, infohash, errorfunc, resumefile):
|
||||
self.numpieces = len(hashes)
|
||||
self.storage = storage
|
||||
self.config = config
|
||||
check_hashes = config['check_hashes']
|
||||
self.hashes = hashes
|
||||
self.piece_size = piece_size
|
||||
self.data_flunked = data_flunked
|
||||
self.errorfunc = errorfunc
|
||||
self.total_length = storage.get_total_length()
|
||||
self.amount_left = self.total_length
|
||||
self.partial_mark = "BitTorrent - this part has not been "+\
|
||||
"downloaded yet."+infohash+\
|
||||
tobinary(config['download_slice_size'])
|
||||
if self.total_length <= piece_size * (self.numpieces - 1):
|
||||
raise BTFailure, _("bad data in responsefile - total too small")
|
||||
if self.total_length > piece_size * self.numpieces:
|
||||
raise BTFailure, _("bad data in responsefile - total too big")
|
||||
self.finished = finished
|
||||
self.numactive = array('H', [0] * self.numpieces)
|
||||
self.inactive_requests = [1] * self.numpieces
|
||||
self.amount_inactive = self.total_length
|
||||
self.endgame = False
|
||||
self.have = Bitfield(self.numpieces)
|
||||
self.waschecked = Bitfield(self.numpieces)
|
||||
if self.numpieces < 32768:
|
||||
typecode = 'h'
|
||||
else:
|
||||
typecode = 'l'
|
||||
self.places = array(typecode, [NO_PLACE] * self.numpieces)
|
||||
if not check_hashes:
|
||||
self.rplaces = array(typecode, range(self.numpieces))
|
||||
fastresume = True
|
||||
else:
|
||||
self.rplaces = self._load_fastresume(resumefile, typecode)
|
||||
if self.rplaces is not None:
|
||||
fastresume = True
|
||||
else:
|
||||
self.rplaces = array(typecode, [UNALLOCATED] * self.numpieces)
|
||||
fastresume = False
|
||||
self.holepos = 0
|
||||
self.stat_numfound = 0
|
||||
self.stat_numflunked = 0
|
||||
self.stat_numdownloaded = 0
|
||||
self.stat_active = {}
|
||||
self.stat_new = {}
|
||||
self.stat_dirty = {}
|
||||
self.download_history = {}
|
||||
self.failed_pieces = {}
|
||||
|
||||
if self.numpieces == 0:
|
||||
return
|
||||
targets = {}
|
||||
total = 0
|
||||
if not fastresume:
|
||||
for i in xrange(self.numpieces):
|
||||
if self._waspre(i):
|
||||
self.rplaces[i] = ALLOCATED
|
||||
total += 1
|
||||
else:
|
||||
targets[hashes[i]] = i
|
||||
if total and check_hashes:
|
||||
statusfunc(_("checking existing file"), 0)
|
||||
def markgot(piece, pos):
|
||||
if self.have[piece]:
|
||||
if piece != pos:
|
||||
return
|
||||
self.rplaces[self.places[pos]] = ALLOCATED
|
||||
self.places[pos] = self.rplaces[pos] = pos
|
||||
return
|
||||
self.places[piece] = pos
|
||||
self.rplaces[pos] = piece
|
||||
self.have[piece] = True
|
||||
self.amount_left -= self._piecelen(piece)
|
||||
self.amount_inactive -= self._piecelen(piece)
|
||||
self.inactive_requests[piece] = None
|
||||
if not fastresume:
|
||||
self.waschecked[piece] = True
|
||||
self.stat_numfound += 1
|
||||
lastlen = self._piecelen(self.numpieces - 1)
|
||||
partials = {}
|
||||
for i in xrange(self.numpieces):
|
||||
if not self._waspre(i):
|
||||
if self.rplaces[i] != UNALLOCATED:
|
||||
raise BTFailure(_("--check_hashes 0 or fastresume info "
|
||||
"doesn't match file state (missing data)"))
|
||||
continue
|
||||
elif fastresume:
|
||||
t = self.rplaces[i]
|
||||
if t >= 0:
|
||||
markgot(t, i)
|
||||
continue
|
||||
if t == UNALLOCATED:
|
||||
raise BTFailure(_("Bad fastresume info (files contain more "
|
||||
"data)"))
|
||||
if t == ALLOCATED:
|
||||
continue
|
||||
if t!= FASTRESUME_PARTIAL:
|
||||
raise BTFailure(_("Bad fastresume info (illegal value)"))
|
||||
data = self.storage.read(self.piece_size * i,
|
||||
self._piecelen(i))
|
||||
self._check_partial(i, partials, data)
|
||||
self.rplaces[i] = ALLOCATED
|
||||
else:
|
||||
data = self.storage.read(piece_size * i, self._piecelen(i))
|
||||
sh = sha(buffer(data, 0, lastlen))
|
||||
sp = sh.digest()
|
||||
sh.update(buffer(data, lastlen))
|
||||
s = sh.digest()
|
||||
if s == hashes[i]:
|
||||
markgot(i, i)
|
||||
elif s in targets and self._piecelen(i) == self._piecelen(targets[s]):
|
||||
markgot(targets[s], i)
|
||||
elif not self.have[self.numpieces - 1] and sp == hashes[-1] and (i == self.numpieces - 1 or not self._waspre(self.numpieces - 1)):
|
||||
markgot(self.numpieces - 1, i)
|
||||
else:
|
||||
self._check_partial(i, partials, data)
|
||||
statusfunc(fractionDone = 1 - self.amount_left /
|
||||
self.total_length)
|
||||
if flag.isSet():
|
||||
return
|
||||
self.amount_left_with_partials = self.amount_left
|
||||
for piece in partials:
|
||||
if self.places[piece] < 0:
|
||||
pos = partials[piece][0]
|
||||
self.places[piece] = pos
|
||||
self.rplaces[pos] = piece
|
||||
self._make_partial(piece, partials[piece][1])
|
||||
for i in xrange(self.numpieces):
|
||||
if self.rplaces[i] != UNALLOCATED:
|
||||
self.storage.allocated(piece_size * i, self._piecelen(i))
|
||||
if self.have[i]:
|
||||
self.storage.downloaded(piece_size * i, self._piecelen(i))
|
||||
|
||||
def _waspre(self, piece):
|
||||
return self.storage.was_preallocated(piece * self.piece_size, self._piecelen(piece))
|
||||
|
||||
def _piecelen(self, piece):
|
||||
if piece < self.numpieces - 1:
|
||||
return self.piece_size
|
||||
else:
|
||||
return self.total_length - piece * self.piece_size
|
||||
|
||||
def _check_partial(self, pos, partials, data):
|
||||
index = None
|
||||
missing = False
|
||||
marklen = len(self.partial_mark)+4
|
||||
for i in xrange(0, len(data) - marklen,
|
||||
self.config['download_slice_size']):
|
||||
if data[i:i+marklen-4] == self.partial_mark:
|
||||
ind = toint(data[i+marklen-4:i+marklen])
|
||||
if index is None:
|
||||
index = ind
|
||||
parts = []
|
||||
if ind >= self.numpieces or ind != index:
|
||||
return
|
||||
parts.append(i)
|
||||
else:
|
||||
missing = True
|
||||
if index is not None and missing:
|
||||
i += self.config['download_slice_size']
|
||||
if i < len(data):
|
||||
parts.append(i)
|
||||
partials[index] = (pos, parts)
|
||||
|
||||
def _make_partial(self, index, parts):
|
||||
length = self._piecelen(index)
|
||||
l = []
|
||||
self.inactive_requests[index] = l
|
||||
x = 0
|
||||
self.amount_left_with_partials -= self._piecelen(index)
|
||||
self.download_history[index] = {}
|
||||
request_size = self.config['download_slice_size']
|
||||
for x in xrange(0, self._piecelen(index), request_size):
|
||||
partlen = min(request_size, length - x)
|
||||
if x in parts:
|
||||
l.append((x, partlen))
|
||||
self.amount_left_with_partials += partlen
|
||||
else:
|
||||
self.amount_inactive -= partlen
|
||||
self.download_history[index][x] = None
|
||||
self.stat_dirty[index] = 1
|
||||
|
||||
def _initalloc(self, pos, piece):
|
||||
assert self.rplaces[pos] < 0
|
||||
assert self.places[piece] == NO_PLACE
|
||||
p = self.piece_size * pos
|
||||
length = self._piecelen(pos)
|
||||
if self.rplaces[pos] == UNALLOCATED:
|
||||
self.storage.allocated(p, length)
|
||||
self.places[piece] = pos
|
||||
self.rplaces[pos] = piece
|
||||
# "if self.rplaces[pos] != ALLOCATED:" to skip extra mark writes
|
||||
mark = self.partial_mark + tobinary(piece)
|
||||
mark += chr(0xff) * (self.config['download_slice_size'] - len(mark))
|
||||
mark *= (length - 1) // len(mark) + 1
|
||||
self.storage.write(p, buffer(mark, 0, length))
|
||||
|
||||
def _move_piece(self, oldpos, newpos):
|
||||
assert self.rplaces[newpos] < 0
|
||||
assert self.rplaces[oldpos] >= 0
|
||||
data = self.storage.read(self.piece_size * oldpos,
|
||||
self._piecelen(newpos))
|
||||
self.storage.write(self.piece_size * newpos, data)
|
||||
if self.rplaces[newpos] == UNALLOCATED:
|
||||
self.storage.allocated(self.piece_size * newpos, len(data))
|
||||
piece = self.rplaces[oldpos]
|
||||
self.places[piece] = newpos
|
||||
self.rplaces[oldpos] = ALLOCATED
|
||||
self.rplaces[newpos] = piece
|
||||
if not self.have[piece]:
|
||||
return
|
||||
data = data[:self._piecelen(piece)]
|
||||
if sha(data).digest() != self.hashes[piece]:
|
||||
raise BTFailure(_("data corrupted on disk - "
|
||||
"maybe you have two copies running?"))
|
||||
|
||||
def _get_free_place(self):
|
||||
while self.rplaces[self.holepos] >= 0:
|
||||
self.holepos += 1
|
||||
return self.holepos
|
||||
|
||||
def get_amount_left(self):
|
||||
return self.amount_left
|
||||
|
||||
def do_I_have_anything(self):
|
||||
return self.amount_left < self.total_length
|
||||
|
||||
def _make_inactive(self, index):
|
||||
length = self._piecelen(index)
|
||||
l = []
|
||||
x = 0
|
||||
request_size = self.config['download_slice_size']
|
||||
while x + request_size < length:
|
||||
l.append((x, request_size))
|
||||
x += request_size
|
||||
l.append((x, length - x))
|
||||
self.inactive_requests[index] = l
|
||||
|
||||
def _load_fastresume(self, resumefile, typecode):
|
||||
if resumefile is not None:
|
||||
try:
|
||||
r = array(typecode)
|
||||
r.fromfile(resumefile, self.numpieces)
|
||||
return r
|
||||
except Exception, e:
|
||||
self.errorfunc(WARNING, _("Couldn't read fastresume data: ") +
|
||||
str(e))
|
||||
return None
|
||||
|
||||
def write_fastresume(self, resumefile):
|
||||
for i in xrange(self.numpieces):
|
||||
if self.rplaces[i] >= 0 and not self.have[self.rplaces[i]]:
|
||||
self.rplaces[i] = FASTRESUME_PARTIAL
|
||||
self.rplaces.tofile(resumefile)
|
||||
|
||||
def get_have_list(self):
|
||||
return self.have.tostring()
|
||||
|
||||
def do_I_have(self, index):
|
||||
return self.have[index]
|
||||
|
||||
def do_I_have_requests(self, index):
|
||||
return not not self.inactive_requests[index]
|
||||
|
||||
def new_request(self, index):
|
||||
# returns (begin, length)
|
||||
if self.inactive_requests[index] == 1:
|
||||
self._make_inactive(index)
|
||||
self.numactive[index] += 1
|
||||
self.stat_active[index] = 1
|
||||
if index not in self.stat_dirty:
|
||||
self.stat_new[index] = 1
|
||||
rs = self.inactive_requests[index]
|
||||
r = min(rs)
|
||||
rs.remove(r)
|
||||
self.amount_inactive -= r[1]
|
||||
if self.amount_inactive == 0:
|
||||
self.endgame = True
|
||||
return r
|
||||
|
||||
def piece_came_in(self, index, begin, piece, source = None):
|
||||
if self.places[index] < 0:
|
||||
if self.rplaces[index] == ALLOCATED:
|
||||
self._initalloc(index, index)
|
||||
else:
|
||||
n = self._get_free_place()
|
||||
if self.places[n] >= 0:
|
||||
oldpos = self.places[n]
|
||||
self._move_piece(oldpos, n)
|
||||
n = oldpos
|
||||
if self.rplaces[index] < 0 or index == n:
|
||||
self._initalloc(n, index)
|
||||
else:
|
||||
self._move_piece(index, n)
|
||||
self._initalloc(index, index)
|
||||
|
||||
if index in self.failed_pieces:
|
||||
old = self.storage.read(self.places[index] * self.piece_size +
|
||||
begin, len(piece))
|
||||
if old != piece:
|
||||
self.failed_pieces[index][self.download_history[index][begin]]\
|
||||
= None
|
||||
self.download_history.setdefault(index, {})
|
||||
self.download_history[index][begin] = source
|
||||
|
||||
self.storage.write(self.places[index] * self.piece_size + begin, piece)
|
||||
self.stat_dirty[index] = 1
|
||||
self.numactive[index] -= 1
|
||||
if self.numactive[index] == 0:
|
||||
del self.stat_active[index]
|
||||
if index in self.stat_new:
|
||||
del self.stat_new[index]
|
||||
if not self.inactive_requests[index] and not self.numactive[index]:
|
||||
del self.stat_dirty[index]
|
||||
if sha(self.storage.read(self.piece_size * self.places[index], self._piecelen(index))).digest() == self.hashes[index]:
|
||||
self.have[index] = True
|
||||
self.storage.downloaded(index * self.piece_size,
|
||||
self._piecelen(index))
|
||||
self.inactive_requests[index] = None
|
||||
self.waschecked[index] = True
|
||||
self.amount_left -= self._piecelen(index)
|
||||
self.stat_numdownloaded += 1
|
||||
for d in self.download_history[index].itervalues():
|
||||
if d is not None:
|
||||
d.good(index)
|
||||
del self.download_history[index]
|
||||
if index in self.failed_pieces:
|
||||
for d in self.failed_pieces[index]:
|
||||
if d is not None:
|
||||
d.bad(index)
|
||||
del self.failed_pieces[index]
|
||||
if self.amount_left == 0:
|
||||
self.finished()
|
||||
else:
|
||||
self.data_flunked(self._piecelen(index), index)
|
||||
self.inactive_requests[index] = 1
|
||||
self.amount_inactive += self._piecelen(index)
|
||||
self.stat_numflunked += 1
|
||||
|
||||
self.failed_pieces[index] = {}
|
||||
allsenders = {}
|
||||
for d in self.download_history[index].itervalues():
|
||||
allsenders[d] = None
|
||||
if len(allsenders) == 1:
|
||||
culprit = allsenders.keys()[0]
|
||||
if culprit is not None:
|
||||
culprit.bad(index, bump = True)
|
||||
del self.failed_pieces[index] # found the culprit already
|
||||
return False
|
||||
return True
|
||||
|
||||
def request_lost(self, index, begin, length):
|
||||
self.inactive_requests[index].append((begin, length))
|
||||
self.amount_inactive += length
|
||||
self.numactive[index] -= 1
|
||||
if not self.numactive[index] and index in self.stat_active:
|
||||
del self.stat_active[index]
|
||||
if index in self.stat_new:
|
||||
del self.stat_new[index]
|
||||
|
||||
def get_piece(self, index, begin, length):
|
||||
if not self.have[index]:
|
||||
return None
|
||||
if not self.waschecked[index]:
|
||||
if sha(self.storage.read(self.piece_size * self.places[index], self._piecelen(index))).digest() != self.hashes[index]:
|
||||
raise BTFailure, _("told file complete on start-up, but piece failed hash check")
|
||||
self.waschecked[index] = True
|
||||
if begin + length > self._piecelen(index):
|
||||
return None
|
||||
return self.storage.read(self.piece_size * self.places[index] + begin, length)
|
810
BitTorrent/TorrentQueue.py
Executable file
810
BitTorrent/TorrentQueue.py
Executable file
@ -0,0 +1,810 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Uoti Urpala
|
||||
|
||||
from __future__ import division
|
||||
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
import traceback
|
||||
|
||||
from BitTorrent.platform import bttime
|
||||
from BitTorrent.download import Feedback, Multitorrent
|
||||
from BitTorrent.controlsocket import ControlSocket
|
||||
from BitTorrent.bencode import bdecode
|
||||
from BitTorrent.ConvertedMetainfo import ConvertedMetainfo
|
||||
from BitTorrent.prefs import Preferences
|
||||
from BitTorrent import BTFailure, BTShutdown, INFO, WARNING, ERROR, CRITICAL
|
||||
from BitTorrent import configfile
|
||||
from BitTorrent import FAQ_URL
|
||||
import BitTorrent
|
||||
|
||||
|
||||
RUNNING = 0
|
||||
RUN_QUEUED = 1
|
||||
QUEUED = 2
|
||||
KNOWN = 3
|
||||
ASKING_LOCATION = 4
|
||||
|
||||
|
||||
class TorrentInfo(object):
|
||||
|
||||
def __init__(self, config):
|
||||
self.metainfo = None
|
||||
self.dlpath = None
|
||||
self.dl = None
|
||||
self.state = None
|
||||
self.completion = None
|
||||
self.finishtime = None
|
||||
self.uptotal = 0
|
||||
self.uptotal_old = 0
|
||||
self.downtotal = 0
|
||||
self.downtotal_old = 0
|
||||
self.config = config
|
||||
|
||||
|
||||
def decode_position(l, pred, succ, default=None):
|
||||
if default is None:
|
||||
default = len(l)
|
||||
if pred is None and succ is None:
|
||||
return default
|
||||
if pred is None:
|
||||
return 0
|
||||
if succ is None:
|
||||
return len(l)
|
||||
try:
|
||||
if l[0] == succ and pred not in l:
|
||||
return 0
|
||||
if l[-1] == pred and succ not in l:
|
||||
return len(l)
|
||||
i = l.index(pred)
|
||||
if l[i+1] == succ:
|
||||
return i+1
|
||||
except (ValueError, IndexError):
|
||||
pass
|
||||
return default
|
||||
|
||||
|
||||
class TorrentQueue(Feedback):
|
||||
|
||||
def __init__(self, config, ui_options, controlsocket):
|
||||
self.ui_options = ui_options
|
||||
self.controlsocket = controlsocket
|
||||
self.config = config
|
||||
self.config['def_running_torrents'] = 1 # !@# XXX
|
||||
self.config['max_running_torrents'] = 100 # !@# XXX
|
||||
self.doneflag = threading.Event()
|
||||
self.torrents = {}
|
||||
self.starting_torrent = None
|
||||
self.running_torrents = []
|
||||
self.queue = []
|
||||
self.other_torrents = []
|
||||
self.last_save_time = 0
|
||||
self.last_version_check = 0
|
||||
self.initialized = 0
|
||||
|
||||
def run(self, ui, ui_wrap, startflag):
|
||||
try:
|
||||
self.ui = ui
|
||||
self.run_ui_task = ui_wrap
|
||||
self.multitorrent = Multitorrent(self.config, self.doneflag,
|
||||
self.global_error, listen_fail_ok=True)
|
||||
self.rawserver = self.multitorrent.rawserver
|
||||
self.controlsocket.set_rawserver(self.rawserver)
|
||||
self.controlsocket.start_listening(self.external_command)
|
||||
try:
|
||||
self._restore_state()
|
||||
except BTFailure, e:
|
||||
self.torrents = {}
|
||||
self.running_torrents = []
|
||||
self.queue = []
|
||||
self.other_torrents = []
|
||||
self.global_error(ERROR, _("Could not load saved state: ")+str(e))
|
||||
else:
|
||||
for infohash in self.running_torrents + self.queue + \
|
||||
self.other_torrents:
|
||||
t = self.torrents[infohash]
|
||||
if t.dlpath is not None:
|
||||
t.completion = self.multitorrent.get_completion(
|
||||
self.config, t.metainfo, t.dlpath)
|
||||
state = t.state
|
||||
if state == RUN_QUEUED:
|
||||
state = RUNNING
|
||||
self.run_ui_task(self.ui.new_displayed_torrent, infohash,
|
||||
t.metainfo, t.dlpath, state, t.config,
|
||||
t.completion, t.uptotal, t.downtotal, )
|
||||
self._check_queue()
|
||||
self.initialized = 1
|
||||
startflag.set()
|
||||
except Exception, e:
|
||||
# dump a normal exception traceback
|
||||
traceback.print_exc()
|
||||
# set the error flag
|
||||
self.initialized = -1
|
||||
# signal the gui thread to stop waiting
|
||||
startflag.set()
|
||||
return
|
||||
|
||||
self._queue_loop()
|
||||
self.multitorrent.rawserver.listen_forever()
|
||||
if self.doneflag.isSet():
|
||||
self.run_ui_task(self.ui.quit)
|
||||
self.multitorrent.close_listening_socket()
|
||||
self.controlsocket.close_socket()
|
||||
for infohash in list(self.running_torrents):
|
||||
t = self.torrents[infohash]
|
||||
if t.state == RUN_QUEUED:
|
||||
continue
|
||||
t.dl.shutdown()
|
||||
if t.dl is not None: # possibly set to none by failed()
|
||||
totals = t.dl.get_total_transfer()
|
||||
t.uptotal = t.uptotal_old + totals[0]
|
||||
t.downtotal = t.downtotal_old + totals[1]
|
||||
self._dump_state()
|
||||
|
||||
def _check_version(self):
|
||||
now = bttime()
|
||||
if self.last_version_check > 0 and \
|
||||
self.last_version_check > now - 24*60*60:
|
||||
return
|
||||
self.last_version_check = now
|
||||
self.run_ui_task(self.ui.check_version)
|
||||
|
||||
def _dump_config(self):
|
||||
configfile.save_ui_config(self.config, 'bittorrent',
|
||||
self.ui_options, self.global_error)
|
||||
for infohash,t in self.torrents.items():
|
||||
ec = lambda level, message: self.error(t.metainfo, level, message)
|
||||
config = t.config.getDict()
|
||||
if config:
|
||||
configfile.save_torrent_config(self.config['data_dir'],
|
||||
infohash, config, ec)
|
||||
|
||||
def _dump_state(self):
|
||||
self.last_save_time = bttime()
|
||||
r = []
|
||||
def write_entry(infohash, t):
|
||||
if t.dlpath is None:
|
||||
assert t.state == ASKING_LOCATION
|
||||
r.append(infohash.encode('hex') + '\n')
|
||||
else:
|
||||
r.append(infohash.encode('hex') + ' ' + str(t.uptotal) + ' ' +
|
||||
str(t.downtotal)+' '+t.dlpath.encode('string_escape')+'\n')
|
||||
r.append('BitTorrent UI state file, version 3\n')
|
||||
r.append('Running torrents\n')
|
||||
for infohash in self.running_torrents:
|
||||
write_entry(infohash, self.torrents[infohash])
|
||||
r.append('Queued torrents\n')
|
||||
for infohash in self.queue:
|
||||
write_entry(infohash, self.torrents[infohash])
|
||||
r.append('Known torrents\n')
|
||||
for infohash in self.other_torrents:
|
||||
write_entry(infohash, self.torrents[infohash])
|
||||
r.append('End\n')
|
||||
f = None
|
||||
try:
|
||||
filename = os.path.join(self.config['data_dir'], 'ui_state')
|
||||
f = file(filename + '.new', 'wb')
|
||||
f.write(''.join(r))
|
||||
f.close()
|
||||
if os.access(filename, os.F_OK):
|
||||
os.remove(filename) # no atomic rename on win32
|
||||
os.rename(filename + '.new', filename)
|
||||
except Exception, e:
|
||||
self.global_error(ERROR, _("Could not save UI state: ") + str(e))
|
||||
if f is not None:
|
||||
f.close()
|
||||
|
||||
def _restore_state(self):
|
||||
def decode_line(line):
|
||||
hashtext = line[:40]
|
||||
try:
|
||||
infohash = hashtext.decode('hex')
|
||||
except:
|
||||
raise BTFailure(_("Invalid state file contents"))
|
||||
if len(infohash) != 20:
|
||||
raise BTFailure(_("Invalid state file contents"))
|
||||
try:
|
||||
path = os.path.join(self.config['data_dir'], 'metainfo',
|
||||
hashtext)
|
||||
f = file(path, 'rb')
|
||||
data = f.read()
|
||||
f.close()
|
||||
except Exception, e:
|
||||
try:
|
||||
f.close()
|
||||
except:
|
||||
pass
|
||||
self.global_error(ERROR,
|
||||
_("Error reading file ") + path +
|
||||
" (" + str(e)+ "), " +
|
||||
_("cannot restore state completely"))
|
||||
return None
|
||||
if infohash in self.torrents:
|
||||
raise BTFailure(_("Invalid state file (duplicate entry)"))
|
||||
t = TorrentInfo(Preferences(self.config))
|
||||
self.torrents[infohash] = t
|
||||
try:
|
||||
t.metainfo = ConvertedMetainfo(bdecode(data))
|
||||
except Exception, e:
|
||||
self.global_error(ERROR, _("Corrupt data in ")+path+
|
||||
_(" , cannot restore torrent (")+str(e)+")")
|
||||
return None
|
||||
t.metainfo.reported_errors = True # suppress redisplay on restart
|
||||
if infohash != t.metainfo.infohash:
|
||||
self.global_error(ERROR, _("Corrupt data in ")+path+
|
||||
_(" , cannot restore torrent (")+'infohash mismatch'+")")
|
||||
# BUG cannot localize due to string freeze
|
||||
return None
|
||||
if len(line) == 41:
|
||||
t.dlpath = None
|
||||
return infohash, t
|
||||
config = configfile.read_torrent_config(self.config,
|
||||
self.config['data_dir'],
|
||||
infohash, self.global_error)
|
||||
if config:
|
||||
t.config.update(config)
|
||||
try:
|
||||
if version < 2:
|
||||
t.dlpath = line[41:-1].decode('string_escape')
|
||||
else:
|
||||
up, down, dlpath = line[41:-1].split(' ', 2)
|
||||
t.uptotal = t.uptotal_old = int(up)
|
||||
t.downtotal = t.downtotal_old = int(down)
|
||||
t.dlpath = dlpath.decode('string_escape')
|
||||
except ValueError: # unpack, int(), decode()
|
||||
raise BTFailure(_("Invalid state file (bad entry)"))
|
||||
return infohash, t
|
||||
filename = os.path.join(self.config['data_dir'], 'ui_state')
|
||||
if not os.path.exists(filename):
|
||||
return
|
||||
f = None
|
||||
try:
|
||||
f = file(filename, 'rb')
|
||||
lines = f.readlines()
|
||||
f.close()
|
||||
except Exception, e:
|
||||
if f is not None:
|
||||
f.close()
|
||||
raise BTFailure(str(e))
|
||||
i = iter(lines)
|
||||
try:
|
||||
txt = 'BitTorrent UI state file, version '
|
||||
version = i.next()
|
||||
if not version.startswith(txt):
|
||||
raise BTFailure(_("Bad UI state file"))
|
||||
try:
|
||||
version = int(version[len(txt):-1])
|
||||
except:
|
||||
raise BTFailure(_("Bad UI state file version"))
|
||||
if version > 3:
|
||||
raise BTFailure(_("Unsupported UI state file version (from "
|
||||
"newer client version?)"))
|
||||
if version < 3:
|
||||
if i.next() != 'Running/queued torrents\n':
|
||||
raise BTFailure(_("Invalid state file contents"))
|
||||
else:
|
||||
if i.next() != 'Running torrents\n':
|
||||
raise BTFailure(_("Invalid state file contents"))
|
||||
while True:
|
||||
line = i.next()
|
||||
if line == 'Queued torrents\n':
|
||||
break
|
||||
t = decode_line(line)
|
||||
if t is None:
|
||||
continue
|
||||
infohash, t = t
|
||||
if t.dlpath is None:
|
||||
raise BTFailure(_("Invalid state file contents"))
|
||||
t.state = RUN_QUEUED
|
||||
self.running_torrents.append(infohash)
|
||||
while True:
|
||||
line = i.next()
|
||||
if line == 'Known torrents\n':
|
||||
break
|
||||
t = decode_line(line)
|
||||
if t is None:
|
||||
continue
|
||||
infohash, t = t
|
||||
if t.dlpath is None:
|
||||
raise BTFailure(_("Invalid state file contents"))
|
||||
t.state = QUEUED
|
||||
self.queue.append(infohash)
|
||||
while True:
|
||||
line = i.next()
|
||||
if line == 'End\n':
|
||||
break
|
||||
t = decode_line(line)
|
||||
if t is None:
|
||||
continue
|
||||
infohash, t = t
|
||||
if t.dlpath is None:
|
||||
t.state = ASKING_LOCATION
|
||||
else:
|
||||
t.state = KNOWN
|
||||
self.other_torrents.append(infohash)
|
||||
except StopIteration:
|
||||
raise BTFailure(_("Invalid state file contents"))
|
||||
|
||||
def _queue_loop(self):
|
||||
if self.doneflag.isSet():
|
||||
return
|
||||
self.rawserver.add_task(self._queue_loop, 20)
|
||||
now = bttime()
|
||||
self._check_version()
|
||||
if self.queue and self.starting_torrent is None:
|
||||
mintime = now - self.config['next_torrent_time'] * 60
|
||||
minratio = self.config['next_torrent_ratio'] / 100
|
||||
if self.config['seed_forever']:
|
||||
minratio = 1e99
|
||||
else:
|
||||
mintime = 0
|
||||
minratio = self.config['last_torrent_ratio'] / 100
|
||||
if self.config['seed_last_forever']:
|
||||
minratio = 1e99
|
||||
if minratio >= 1e99:
|
||||
return
|
||||
for infohash in self.running_torrents:
|
||||
t = self.torrents[infohash]
|
||||
myminratio = minratio
|
||||
if t.dl:
|
||||
if self.queue and t.dl.config['seed_last_forever']:
|
||||
myminratio = 1e99
|
||||
elif t.dl.config['seed_forever']:
|
||||
myminratio = 1e99
|
||||
if t.state == RUN_QUEUED:
|
||||
continue
|
||||
totals = t.dl.get_total_transfer()
|
||||
# not updated for remaining torrents if one is stopped, who cares
|
||||
t.uptotal = t.uptotal_old + totals[0]
|
||||
t.downtotal = t.downtotal_old + totals[1]
|
||||
if t.finishtime is None or t.finishtime > now - 120:
|
||||
continue
|
||||
if t.finishtime > mintime:
|
||||
if t.uptotal < t.metainfo.total_bytes * myminratio:
|
||||
continue
|
||||
self.change_torrent_state(infohash, RUNNING, KNOWN)
|
||||
break
|
||||
if self.running_torrents and self.last_save_time < now - 300:
|
||||
self._dump_state()
|
||||
|
||||
def _check_queue(self):
|
||||
if self.starting_torrent is not None or self.config['pause']:
|
||||
return
|
||||
for infohash in self.running_torrents:
|
||||
if self.torrents[infohash].state == RUN_QUEUED:
|
||||
self.starting_torrent = infohash
|
||||
t = self.torrents[infohash]
|
||||
t.state = RUNNING
|
||||
t.finishtime = None
|
||||
t.dl = self.multitorrent.start_torrent(t.metainfo, t.config,
|
||||
self, t.dlpath)
|
||||
return
|
||||
if not self.queue or len(self.running_torrents) >= \
|
||||
self.config['def_running_torrents']:
|
||||
return
|
||||
infohash = self.queue.pop(0)
|
||||
self.starting_torrent = infohash
|
||||
t = self.torrents[infohash]
|
||||
assert t.state == QUEUED
|
||||
t.state = RUNNING
|
||||
t.finishtime = None
|
||||
self.running_torrents.append(infohash)
|
||||
t.dl = self.multitorrent.start_torrent(t.metainfo, t.config, self,
|
||||
t.dlpath)
|
||||
self._send_state(infohash)
|
||||
|
||||
def _send_state(self, infohash):
|
||||
t = self.torrents[infohash]
|
||||
state = t.state
|
||||
if state == RUN_QUEUED:
|
||||
state = RUNNING
|
||||
pos = None
|
||||
if state in (KNOWN, RUNNING, QUEUED):
|
||||
l = self._get_list(state)
|
||||
if l[-1] != infohash:
|
||||
pos = l.index(infohash)
|
||||
self.run_ui_task(self.ui.torrent_state_changed, infohash, t.dlpath,
|
||||
state, t.completion, t.uptotal_old, t.downtotal_old, pos)
|
||||
|
||||
def _stop_running(self, infohash):
|
||||
t = self.torrents[infohash]
|
||||
if t.state == RUN_QUEUED:
|
||||
self.running_torrents.remove(infohash)
|
||||
t.state = KNOWN
|
||||
return True
|
||||
assert t.state == RUNNING
|
||||
t.dl.shutdown()
|
||||
if infohash == self.starting_torrent:
|
||||
self.starting_torrent = None
|
||||
try:
|
||||
self.running_torrents.remove(infohash)
|
||||
except ValueError:
|
||||
self.other_torrents.remove(infohash)
|
||||
return False
|
||||
else:
|
||||
t.state = KNOWN
|
||||
totals = t.dl.get_total_transfer()
|
||||
t.uptotal_old += totals[0]
|
||||
t.uptotal = t.uptotal_old
|
||||
t.downtotal_old += totals[1]
|
||||
t.downtotal = t.downtotal_old
|
||||
t.dl = None
|
||||
t.completion = self.multitorrent.get_completion(self.config,
|
||||
t.metainfo, t.dlpath)
|
||||
return True
|
||||
|
||||
def external_command(self, action, *datas):
|
||||
if action == 'start_torrent':
|
||||
assert len(datas) == 2
|
||||
self.start_new_torrent(datas[0], save_as=datas[1])
|
||||
elif action == 'show_error':
|
||||
assert len(datas) == 1
|
||||
self.global_error(ERROR, datas[0])
|
||||
elif action == 'no-op':
|
||||
pass
|
||||
|
||||
def remove_torrent(self, infohash):
|
||||
if infohash not in self.torrents:
|
||||
return
|
||||
state = self.torrents[infohash].state
|
||||
if state == QUEUED:
|
||||
self.queue.remove(infohash)
|
||||
elif state in (RUNNING, RUN_QUEUED):
|
||||
self._stop_running(infohash)
|
||||
self._check_queue()
|
||||
else:
|
||||
self.other_torrents.remove(infohash)
|
||||
self.run_ui_task(self.ui.removed_torrent, infohash)
|
||||
del self.torrents[infohash]
|
||||
|
||||
for d in ['metainfo', 'resume']:
|
||||
filename = os.path.join(self.config['data_dir'], d,
|
||||
infohash.encode('hex'))
|
||||
try:
|
||||
os.remove(filename)
|
||||
except Exception, e:
|
||||
self.global_error(WARNING,
|
||||
(_("Could not delete cached %s file:")%d) +
|
||||
str(e))
|
||||
ec = lambda level, message: self.global_error(level, message)
|
||||
configfile.remove_torrent_config(self.config['data_dir'],
|
||||
infohash, ec)
|
||||
self._dump_state()
|
||||
|
||||
def set_save_location(self, infohash, dlpath):
|
||||
torrent = self.torrents.get(infohash)
|
||||
if torrent is None or torrent.state == RUNNING:
|
||||
return
|
||||
torrent.dlpath = dlpath
|
||||
torrent.completion = self.multitorrent.get_completion(self.config,
|
||||
torrent.metainfo, dlpath)
|
||||
if torrent.state == ASKING_LOCATION:
|
||||
torrent.state = KNOWN
|
||||
self.change_torrent_state(infohash, KNOWN, QUEUED)
|
||||
else:
|
||||
self._send_state(infohash)
|
||||
self._dump_state()
|
||||
|
||||
def start_new_torrent(self, data, save_as=None):
|
||||
t = TorrentInfo(Preferences(self.config))
|
||||
try:
|
||||
t.metainfo = ConvertedMetainfo(bdecode(data))
|
||||
except Exception, e:
|
||||
self.global_error(ERROR, _("This is not a valid torrent file. (%s)")
|
||||
% str(e))
|
||||
return
|
||||
infohash = t.metainfo.infohash
|
||||
if infohash in self.torrents:
|
||||
real_state = self.torrents[infohash].state
|
||||
if real_state in (RUNNING, RUN_QUEUED):
|
||||
self.error(t.metainfo, ERROR,
|
||||
_("This torrent (or one with the same contents) is "
|
||||
"already running."))
|
||||
elif real_state == QUEUED:
|
||||
self.error(t.metainfo, ERROR,
|
||||
_("This torrent (or one with the same contents) is "
|
||||
"already waiting to run."))
|
||||
elif real_state == ASKING_LOCATION:
|
||||
pass
|
||||
elif real_state == KNOWN:
|
||||
self.change_torrent_state(infohash, KNOWN, newstate=QUEUED)
|
||||
else:
|
||||
raise BTFailure(_("Torrent in unknown state %d") % real_state)
|
||||
return
|
||||
|
||||
path = os.path.join(self.config['data_dir'], 'metainfo',
|
||||
infohash.encode('hex'))
|
||||
try:
|
||||
f = file(path+'.new', 'wb')
|
||||
f.write(data)
|
||||
f.close()
|
||||
if os.access(path, os.F_OK):
|
||||
os.remove(path) # no atomic rename on win32
|
||||
os.rename(path+'.new', path)
|
||||
except Exception, e:
|
||||
try:
|
||||
f.close()
|
||||
except:
|
||||
pass
|
||||
self.global_error(ERROR, _("Could not write file ") + path +
|
||||
' (' + str(e) + '), ' +
|
||||
_("torrent will not be restarted "
|
||||
"correctly on client restart"))
|
||||
|
||||
config = configfile.read_torrent_config(self.config,
|
||||
self.config['data_dir'],
|
||||
infohash, self.global_error)
|
||||
if config:
|
||||
t.config.update(config)
|
||||
if save_as:
|
||||
self.run_ui_task(self.ui.set_config, 'save_as', save_as)
|
||||
else:
|
||||
save_as = None
|
||||
|
||||
self.torrents[infohash] = t
|
||||
t.state = ASKING_LOCATION
|
||||
self.other_torrents.append(infohash)
|
||||
self._dump_state()
|
||||
self.run_ui_task(self.ui.new_displayed_torrent, infohash,
|
||||
t.metainfo, save_as, t.state, t.config)
|
||||
|
||||
def show_error(level, text):
|
||||
self.run_ui_task(self.ui.error, infohash, level, text)
|
||||
t.metainfo.show_encoding_errors(show_error)
|
||||
|
||||
def set_config(self, option, value, ihash=None):
|
||||
if not ihash:
|
||||
oldvalue = self.config[option]
|
||||
self.config[option] = value
|
||||
self.multitorrent.set_option(option, value)
|
||||
if option == 'pause':
|
||||
if value:# and not oldvalue:
|
||||
self.set_zero_running_torrents()
|
||||
elif not value:# and oldvalue:
|
||||
self._check_queue()
|
||||
else:
|
||||
torrent = self.torrents[ihash]
|
||||
if torrent.state == RUNNING:
|
||||
torrent.dl.set_option(option, value)
|
||||
if option in ('forwarded_port', 'maxport'):
|
||||
torrent.dl.change_port()
|
||||
torrent.config[option] = value
|
||||
self._dump_config()
|
||||
|
||||
def request_status(self, infohash, want_spew, want_fileinfo):
|
||||
torrent = self.torrents.get(infohash)
|
||||
if torrent is None or torrent.state != RUNNING:
|
||||
return
|
||||
status = torrent.dl.get_status(want_spew, want_fileinfo)
|
||||
if torrent.finishtime is not None:
|
||||
now = bttime()
|
||||
uptotal = status['upTotal'] + torrent.uptotal_old
|
||||
downtotal = status['downTotal'] + torrent.downtotal_old
|
||||
ulspeed = status['upRate2']
|
||||
if self.queue:
|
||||
ratio = torrent.dl.config['next_torrent_ratio'] / 100
|
||||
if torrent.dl.config['seed_forever']:
|
||||
ratio = 1e99
|
||||
else:
|
||||
ratio = torrent.dl.config['last_torrent_ratio'] / 100
|
||||
if torrent.dl.config['seed_last_forever']:
|
||||
ratio = 1e99
|
||||
if ulspeed <= 0 or ratio >= 1e99:
|
||||
rem = 1e99
|
||||
elif downtotal == 0:
|
||||
rem = (torrent.metainfo.total_bytes * ratio - uptotal) / ulspeed
|
||||
else:
|
||||
rem = (downtotal * ratio - uptotal) / ulspeed
|
||||
if self.queue and not torrent.dl.config['seed_forever']:
|
||||
rem = min(rem, torrent.finishtime +
|
||||
torrent.dl.config['next_torrent_time'] * 60 - now)
|
||||
rem = max(rem, torrent.finishtime + 120 - now)
|
||||
if rem <= 0:
|
||||
rem = 1
|
||||
if rem >= 1e99:
|
||||
rem = None
|
||||
status['timeEst'] = rem
|
||||
self.run_ui_task(self.ui.update_status, infohash, status)
|
||||
|
||||
def _get_list(self, state):
|
||||
if state == KNOWN:
|
||||
return self.other_torrents
|
||||
elif state == QUEUED:
|
||||
return self.queue
|
||||
elif state in (RUNNING, RUN_QUEUED):
|
||||
return self.running_torrents
|
||||
assert False
|
||||
|
||||
def change_torrent_state(self, infohash, oldstate, newstate=None,
|
||||
pred=None, succ=None, replaced=None, force_running=False):
|
||||
t = self.torrents.get(infohash)
|
||||
if t is None or (t.state != oldstate and not (t.state == RUN_QUEUED and
|
||||
oldstate == RUNNING)):
|
||||
return
|
||||
if newstate is None:
|
||||
newstate = oldstate
|
||||
assert oldstate in (KNOWN, QUEUED, RUNNING)
|
||||
assert newstate in (KNOWN, QUEUED, RUNNING)
|
||||
pos = None
|
||||
if oldstate != RUNNING and newstate == RUNNING and replaced is None:
|
||||
if len(self.running_torrents) >= (force_running and self.config[
|
||||
'max_running_torrents'] or self.config['def_running_torrents']):
|
||||
if force_running:
|
||||
self.global_error(ERROR,
|
||||
_("Can't run more than %d torrents "
|
||||
"simultaneously. For more info see the"
|
||||
" FAQ at %s.")%
|
||||
(self.config['max_running_torrents'],
|
||||
FAQ_URL))
|
||||
newstate = QUEUED
|
||||
pos = 0
|
||||
l = self._get_list(newstate)
|
||||
if newstate == oldstate:
|
||||
origpos = l.index(infohash)
|
||||
del l[origpos]
|
||||
if pos is None:
|
||||
pos = decode_position(l, pred, succ, -1)
|
||||
if pos == -1 or l == origpos:
|
||||
l.insert(origpos, infohash)
|
||||
return
|
||||
l.insert(pos, infohash)
|
||||
self._dump_state()
|
||||
self.run_ui_task(self.ui.reorder_torrent, infohash, pos)
|
||||
return
|
||||
if pos is None:
|
||||
pos = decode_position(l, pred, succ)
|
||||
if newstate == RUNNING:
|
||||
newstate = RUN_QUEUED
|
||||
if replaced and len(self.running_torrents) >= \
|
||||
self.config['def_running_torrents']:
|
||||
t2 = self.torrents.get(replaced)
|
||||
if t2 is None or t2.state not in (RUNNING, RUN_QUEUED):
|
||||
return
|
||||
if self.running_torrents.index(replaced) < pos:
|
||||
pos -= 1
|
||||
if self._stop_running(replaced):
|
||||
t2.state = QUEUED
|
||||
self.queue.insert(0, replaced)
|
||||
self._send_state(replaced)
|
||||
else:
|
||||
self.other_torrents.append(replaced)
|
||||
if oldstate == RUNNING:
|
||||
if newstate == QUEUED and len(self.running_torrents) <= \
|
||||
self.config['def_running_torrents'] and pos == 0:
|
||||
return
|
||||
if not self._stop_running(infohash):
|
||||
if newstate == KNOWN:
|
||||
self.other_torrents.insert(pos, infohash)
|
||||
self.run_ui_task(self.ui.reorder_torrent, infohash, pos)
|
||||
else:
|
||||
self.other_torrents.append(infohash)
|
||||
return
|
||||
else:
|
||||
self._get_list(oldstate).remove(infohash)
|
||||
t.state = newstate
|
||||
l.insert(pos, infohash)
|
||||
self._check_queue() # sends state if it starts the torrent from queue
|
||||
if t.state != RUNNING or newstate == RUN_QUEUED:
|
||||
self._send_state(infohash)
|
||||
self._dump_state()
|
||||
|
||||
def set_zero_running_torrents(self):
|
||||
newrun = []
|
||||
for infohash in list(self.running_torrents):
|
||||
t = self.torrents[infohash]
|
||||
if self._stop_running(infohash):
|
||||
newrun.append(infohash)
|
||||
t.state = RUN_QUEUED
|
||||
else:
|
||||
self.other_torrents.append(infohash)
|
||||
self.running_torrents = newrun
|
||||
|
||||
def check_completion(self, infohash, filelist=False):
|
||||
t = self.torrents.get(infohash)
|
||||
if t is None:
|
||||
return
|
||||
r = self.multitorrent.get_completion(self.config, t.metainfo,
|
||||
t.dlpath, filelist)
|
||||
if r is None or not filelist:
|
||||
self.run_ui_task(self.ui.update_completion, infohash, r)
|
||||
else:
|
||||
self.run_ui_task(self.ui.update_completion, infohash, *r)
|
||||
|
||||
def global_error(self, level, text):
|
||||
self.run_ui_task(self.ui.global_error, level, text)
|
||||
|
||||
# callbacks from torrent instances
|
||||
|
||||
def failed(self, torrent, is_external):
|
||||
infohash = torrent.infohash
|
||||
if infohash == self.starting_torrent:
|
||||
self.starting_torrent = None
|
||||
self.running_torrents.remove(infohash)
|
||||
t = self.torrents[infohash]
|
||||
t.state = KNOWN
|
||||
if is_external:
|
||||
t.completion = self.multitorrent.get_completion(
|
||||
self.config, t.metainfo, t.dlpath)
|
||||
else:
|
||||
t.completion = None
|
||||
totals = t.dl.get_total_transfer()
|
||||
t.uptotal_old += totals[0]
|
||||
t.uptotal = t.uptotal_old
|
||||
t.downtotal_old += totals[1]
|
||||
t.downtotal = t.downtotal_old
|
||||
t.dl = None
|
||||
self.other_torrents.append(infohash)
|
||||
self._send_state(infohash)
|
||||
if not self.doneflag.isSet():
|
||||
self._check_queue()
|
||||
self._dump_state()
|
||||
|
||||
def finished(self, torrent):
|
||||
infohash = torrent.infohash
|
||||
t = self.torrents[infohash]
|
||||
totals = t.dl.get_total_transfer()
|
||||
if t.downtotal == 0 and t.downtotal_old == 0 and totals[1] == 0:
|
||||
self.set_config('seed_forever', True, infohash)
|
||||
|
||||
if infohash == self.starting_torrent:
|
||||
t = self.torrents[infohash]
|
||||
if self.queue:
|
||||
ratio = t.config['next_torrent_ratio'] / 100
|
||||
if t.config['seed_forever']:
|
||||
ratio = 1e99
|
||||
msg = _("Not starting torrent as there are other torrents "
|
||||
"waiting to run, and this one already meets the "
|
||||
"settings for when to stop seeding.")
|
||||
else:
|
||||
ratio = t.config['last_torrent_ratio'] / 100
|
||||
if t.config['seed_last_forever']:
|
||||
ratio = 1e99
|
||||
msg = _("Not starting torrent as it already meets the "
|
||||
"settings for when to stop seeding the last "
|
||||
"completed torrent.")
|
||||
if ratio < 1e99 and t.uptotal >= t.metainfo.total_bytes * ratio:
|
||||
raise BTShutdown(msg)
|
||||
self.torrents[torrent.infohash].finishtime = bttime()
|
||||
|
||||
def started(self, torrent):
|
||||
infohash = torrent.infohash
|
||||
assert infohash == self.starting_torrent
|
||||
self.starting_torrent = None
|
||||
self._check_queue()
|
||||
|
||||
def error(self, torrent, level, text):
|
||||
self.run_ui_task(self.ui.error, torrent.infohash, level, text)
|
||||
|
||||
|
||||
class ThreadWrappedQueue(object):
|
||||
|
||||
def __init__(self, wrapped):
|
||||
self.wrapped = wrapped
|
||||
|
||||
def set_done(self):
|
||||
self.wrapped.doneflag.set()
|
||||
# add a dummy task to make sure the thread wakes up and notices flag
|
||||
def dummy():
|
||||
pass
|
||||
self.wrapped.rawserver.external_add_task(dummy, 0)
|
||||
|
||||
def _makemethod(methodname):
|
||||
def wrapper(self, *args, **kws):
|
||||
def f():
|
||||
getattr(self.wrapped, methodname)(*args, **kws)
|
||||
self.wrapped.rawserver.external_add_task(f, 0)
|
||||
return wrapper
|
||||
|
||||
for methodname in "request_status set_config start_new_torrent remove_torrent set_save_location change_torrent_state check_completion".split():
|
||||
setattr(ThreadWrappedQueue, methodname, _makemethod(methodname))
|
||||
del _makemethod, methodname
|
97
BitTorrent/Uploader.py
Executable file
97
BitTorrent/Uploader.py
Executable file
@ -0,0 +1,97 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Bram Cohen
|
||||
|
||||
from BitTorrent.CurrentRateMeasure import Measure
|
||||
|
||||
|
||||
class Upload(object):
|
||||
|
||||
def __init__(self, connection, ratelimiter, totalup, totalup2, choker,
|
||||
storage, max_slice_length, max_rate_period):
|
||||
self.connection = connection
|
||||
self.ratelimiter = ratelimiter
|
||||
self.totalup = totalup
|
||||
self.totalup2 = totalup2
|
||||
self.choker = choker
|
||||
self.storage = storage
|
||||
self.max_slice_length = max_slice_length
|
||||
self.max_rate_period = max_rate_period
|
||||
self.choked = True
|
||||
self.unchoke_time = None
|
||||
self.interested = False
|
||||
self.buffer = []
|
||||
self.measure = Measure(max_rate_period)
|
||||
if storage.do_I_have_anything():
|
||||
connection.send_bitfield(storage.get_have_list())
|
||||
|
||||
def got_not_interested(self):
|
||||
if self.interested:
|
||||
self.interested = False
|
||||
del self.buffer[:]
|
||||
self.choker.not_interested(self.connection)
|
||||
|
||||
def got_interested(self):
|
||||
if not self.interested:
|
||||
self.interested = True
|
||||
self.choker.interested(self.connection)
|
||||
|
||||
def get_upload_chunk(self):
|
||||
if not self.buffer:
|
||||
return None
|
||||
index, begin, length = self.buffer.pop(0)
|
||||
piece = self.storage.get_piece(index, begin, length)
|
||||
if piece is None:
|
||||
self.connection.close()
|
||||
return None
|
||||
return (index, begin, piece)
|
||||
|
||||
def update_rate(self, bytes):
|
||||
self.measure.update_rate(bytes)
|
||||
self.totalup.update_rate(bytes)
|
||||
self.totalup2.update_rate(bytes)
|
||||
|
||||
def got_request(self, index, begin, length):
|
||||
if not self.interested or length > self.max_slice_length:
|
||||
self.connection.close()
|
||||
return
|
||||
if not self.connection.choke_sent:
|
||||
self.buffer.append((index, begin, length))
|
||||
if self.connection.next_upload is None and \
|
||||
self.connection.connection.is_flushed():
|
||||
self.ratelimiter.queue(self.connection, self.connection.encoder.context.rlgroup)
|
||||
|
||||
def got_cancel(self, index, begin, length):
|
||||
try:
|
||||
self.buffer.remove((index, begin, length))
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def choke(self):
|
||||
if not self.choked:
|
||||
self.choked = True
|
||||
self.connection.send_choke()
|
||||
|
||||
def sent_choke(self):
|
||||
assert self.choked
|
||||
del self.buffer[:]
|
||||
|
||||
def unchoke(self, time):
|
||||
if self.choked:
|
||||
self.choked = False
|
||||
self.unchoke_time = time
|
||||
self.connection.send_unchoke()
|
||||
|
||||
def has_queries(self):
|
||||
return len(self.buffer) > 0
|
||||
|
||||
def get_rate(self):
|
||||
return self.measure.get_rate()
|
123
BitTorrent/__init__.py
Executable file
123
BitTorrent/__init__.py
Executable file
@ -0,0 +1,123 @@
|
||||
# -*- coding: UTF-8 -*-
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
app_name = 'BitTorrent'
|
||||
version = '4.2.1'
|
||||
|
||||
URL = 'http://www.bittorrent.com/'
|
||||
DONATE_URL = URL + 'donate.html'
|
||||
FAQ_URL = URL + 'FAQ.html'
|
||||
HELP_URL = URL + 'documentation.html'
|
||||
SEARCH_URL = 'http://search.bittorrent.com/search.jsp?client=%(client)s&query=%(query)s'
|
||||
|
||||
import sys
|
||||
assert sys.version_info >= (2, 2, 1), _("Python 2.2.1 or newer required")
|
||||
import os
|
||||
import time
|
||||
|
||||
branch = None
|
||||
if os.access('.cdv', os.F_OK):
|
||||
branch = os.path.split(os.path.realpath(os.path.split(sys.argv[0])[0]))[1]
|
||||
|
||||
from BitTorrent.platform import get_home_dir, is_frozen_exe
|
||||
|
||||
# http://people.w3.org/rishida/names/languages.html
|
||||
language_names = {
|
||||
'af' :u'Afrikaans' , 'bg' :u'Български' ,
|
||||
'da' :u'Dansk' , 'ca' :u'Català' ,
|
||||
'cs' :u'Čeština' , 'de' :u'Deutsch' ,
|
||||
'en' :u'English' , 'es' :u'Español' ,
|
||||
'es_MX':u'Español de Mexico ' , 'fr' :u'Français' ,
|
||||
'gr' :u'Ελληνικά' , 'hu' :u'Magyar' ,
|
||||
'it' :u'Italiano' , 'ja' :u'日本語' ,
|
||||
'ko' :u'한국어' ,'nl' :u'Nederlands' ,
|
||||
'nb_NO':u'Norsk bokmål' , 'pl' :u'Polski' ,
|
||||
'pt' :u'Português' , 'pt_BR':u'Português do Brasil' ,
|
||||
'ro' :u'Română' , 'ru' :u'Русский' ,
|
||||
'sk' :u'Slovenský' , 'sl' :u'Slovensko' ,
|
||||
'sv' :u'Svenska' , 'tr' :u'Türkçe' ,
|
||||
'vi' :u'Tiếng Việt' ,
|
||||
'zh_CN':u'简体中文' , # Simplified
|
||||
'zh_TW':u'繁體中文' , # Traditional
|
||||
}
|
||||
|
||||
unfinished_language_names = {
|
||||
'ar' :u'العربية' , 'bs' :u'Bosanski' ,
|
||||
'eo' :u'Esperanto' , 'eu' :u'Euskara' ,
|
||||
'et' :u'Eesti' , 'fi' :u'Suomi' ,
|
||||
'ga' :u'Gaeilge' , 'gl' :u'Galego' ,
|
||||
'he_IL':u'עברית' , 'hr' :u'Hrvatski' ,
|
||||
'hy' :u'Հայերեն' , 'in' :u'Bahasa indonesia' ,
|
||||
'ka' :u'ქართული ენა', 'lt' :u'Lietuvių' ,
|
||||
'ms' :u'Bahasa melayu' , 'ml' :u'Malayalam' ,
|
||||
'sq' :u'Shqipe' , 'th' :u'ภาษาไทย' ,
|
||||
'tlh' :u'tlhIngan-Hol' , 'uk' :u'Українська' ,
|
||||
'hi' :u'हिन्दी' , 'cy' :u'Cymraeg' ,
|
||||
'is' :u'Íslenska' , 'nn_NO':u'Norsk Nynorsk' ,
|
||||
'te' :u'తెలుగు' ,
|
||||
}
|
||||
|
||||
#language_names.update(unfinished_language_names)
|
||||
|
||||
languages = language_names.keys()
|
||||
languages.sort()
|
||||
|
||||
if os.name == 'posix':
|
||||
if os.uname()[0] == "Darwin":
|
||||
from BitTorrent.platform import install_translation
|
||||
install_translation()
|
||||
|
||||
# hackery to get around bug in py2exe that tries to write log files to
|
||||
# application directories, which may not be writable by non-admin users
|
||||
if is_frozen_exe:
|
||||
baseclass = sys.stderr.__class__
|
||||
class Stderr(baseclass):
|
||||
logroot = get_home_dir()
|
||||
|
||||
if logroot is None:
|
||||
logroot = os.path.splitdrive(sys.executable)[0]
|
||||
if logroot[-1] != os.sep:
|
||||
logroot += os.sep
|
||||
logname = os.path.splitext(os.path.split(sys.executable)[1])[0] + '_errors.log'
|
||||
logpath = os.path.join(logroot, logname)
|
||||
|
||||
def __init__(self):
|
||||
self.just_wrote_newline = True
|
||||
|
||||
def write(self, text, alert=None, fname=logpath):
|
||||
output = text
|
||||
|
||||
if self.just_wrote_newline and not text.startswith('[%s ' % version):
|
||||
output = '[%s %s] %s' % (version, time.strftime('%Y-%m-%d %H:%M:%S'), text)
|
||||
|
||||
if 'GtkWarning' not in text:
|
||||
baseclass.write(self, output, fname=fname)
|
||||
|
||||
if output[-1] == '\n':
|
||||
self.just_wrote_newline = True
|
||||
else:
|
||||
self.just_wrote_newline = False
|
||||
|
||||
sys.stderr = Stderr()
|
||||
|
||||
del sys, get_home_dir, is_frozen_exe
|
||||
|
||||
INFO = 0
|
||||
WARNING = 1
|
||||
ERROR = 2
|
||||
CRITICAL = 3
|
||||
|
||||
class BTFailure(Exception):
|
||||
pass
|
||||
|
||||
class BTShutdown(BTFailure):
|
||||
pass
|
||||
|
130
BitTorrent/bencode.py
Executable file
130
BitTorrent/bencode.py
Executable file
@ -0,0 +1,130 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Petru Paler
|
||||
|
||||
from BitTorrent.obsoletepythonsupport import *
|
||||
|
||||
from BitTorrent import BTFailure
|
||||
|
||||
def decode_int(x, f):
|
||||
f += 1
|
||||
newf = x.index('e', f)
|
||||
n = int(x[f:newf])
|
||||
if x[f] == '-':
|
||||
if x[f + 1] == '0':
|
||||
raise ValueError
|
||||
elif x[f] == '0' and newf != f+1:
|
||||
raise ValueError
|
||||
return (n, newf+1)
|
||||
|
||||
def decode_string(x, f):
|
||||
colon = x.index(':', f)
|
||||
n = int(x[f:colon])
|
||||
if x[f] == '0' and colon != f+1:
|
||||
raise ValueError
|
||||
colon += 1
|
||||
return (x[colon:colon+n], colon+n)
|
||||
|
||||
def decode_list(x, f):
|
||||
r, f = [], f+1
|
||||
while x[f] != 'e':
|
||||
v, f = decode_func[x[f]](x, f)
|
||||
r.append(v)
|
||||
return (r, f + 1)
|
||||
|
||||
def decode_dict(x, f):
|
||||
r, f = {}, f+1
|
||||
lastkey = None
|
||||
while x[f] != 'e':
|
||||
k, f = decode_string(x, f)
|
||||
if lastkey >= k:
|
||||
raise ValueError
|
||||
lastkey = k
|
||||
r[k], f = decode_func[x[f]](x, f)
|
||||
return (r, f + 1)
|
||||
|
||||
decode_func = {}
|
||||
decode_func['l'] = decode_list
|
||||
decode_func['d'] = decode_dict
|
||||
decode_func['i'] = decode_int
|
||||
decode_func['0'] = decode_string
|
||||
decode_func['1'] = decode_string
|
||||
decode_func['2'] = decode_string
|
||||
decode_func['3'] = decode_string
|
||||
decode_func['4'] = decode_string
|
||||
decode_func['5'] = decode_string
|
||||
decode_func['6'] = decode_string
|
||||
decode_func['7'] = decode_string
|
||||
decode_func['8'] = decode_string
|
||||
decode_func['9'] = decode_string
|
||||
|
||||
def bdecode(x):
|
||||
try:
|
||||
r, l = decode_func[x[0]](x, 0)
|
||||
except (IndexError, KeyError, ValueError):
|
||||
raise BTFailure, _("not a valid bencoded string")
|
||||
if l != len(x):
|
||||
raise BTFailure, _("invalid bencoded value (data after valid prefix)")
|
||||
return r
|
||||
|
||||
from types import StringType, IntType, LongType, DictType, ListType, TupleType
|
||||
|
||||
|
||||
class Bencached(object):
|
||||
|
||||
__slots__ = ['bencoded']
|
||||
|
||||
def __init__(self, s):
|
||||
self.bencoded = s
|
||||
|
||||
def encode_bencached(x,r):
|
||||
r.append(x.bencoded)
|
||||
|
||||
def encode_int(x, r):
|
||||
r.extend(('i', str(x), 'e'))
|
||||
|
||||
def encode_string(x, r):
|
||||
r.extend((str(len(x)), ':', x))
|
||||
|
||||
def encode_list(x, r):
|
||||
r.append('l')
|
||||
for i in x:
|
||||
encode_func[type(i)](i, r)
|
||||
r.append('e')
|
||||
|
||||
def encode_dict(x,r):
|
||||
r.append('d')
|
||||
ilist = x.items()
|
||||
ilist.sort()
|
||||
for k, v in ilist:
|
||||
r.extend((str(len(k)), ':', k))
|
||||
encode_func[type(v)](v, r)
|
||||
r.append('e')
|
||||
|
||||
encode_func = {}
|
||||
encode_func[Bencached] = encode_bencached
|
||||
encode_func[IntType] = encode_int
|
||||
encode_func[LongType] = encode_int
|
||||
encode_func[StringType] = encode_string
|
||||
encode_func[ListType] = encode_list
|
||||
encode_func[TupleType] = encode_list
|
||||
encode_func[DictType] = encode_dict
|
||||
|
||||
try:
|
||||
from types import BooleanType
|
||||
encode_func[BooleanType] = encode_int
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
def bencode(x):
|
||||
r = []
|
||||
encode_func[type(x)](x, r)
|
||||
return ''.join(r)
|
77
BitTorrent/bitfield.py
Executable file
77
BitTorrent/bitfield.py
Executable file
@ -0,0 +1,77 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Bram Cohen, Uoti Urpala, and John Hoffman
|
||||
|
||||
from array import array
|
||||
|
||||
from BitTorrent.obsoletepythonsupport import *
|
||||
|
||||
counts = [chr(sum([(i >> j) & 1 for j in xrange(8)])) for i in xrange(256)]
|
||||
counts = ''.join(counts)
|
||||
|
||||
|
||||
class Bitfield:
|
||||
|
||||
def __init__(self, length, bitstring=None):
|
||||
self.length = length
|
||||
rlen, extra = divmod(length, 8)
|
||||
if bitstring is None:
|
||||
self.numfalse = length
|
||||
if extra:
|
||||
self.bits = array('B', chr(0) * (rlen + 1))
|
||||
else:
|
||||
self.bits = array('B', chr(0) * rlen)
|
||||
else:
|
||||
if extra:
|
||||
if len(bitstring) != rlen + 1:
|
||||
raise ValueError
|
||||
if (ord(bitstring[-1]) << extra) & 0xFF != 0:
|
||||
raise ValueError
|
||||
else:
|
||||
if len(bitstring) != rlen:
|
||||
raise ValueError
|
||||
c = counts
|
||||
self.numfalse = length - sum(array('B',
|
||||
bitstring.translate(counts)))
|
||||
if self.numfalse != 0:
|
||||
self.bits = array('B', bitstring)
|
||||
else:
|
||||
self.bits = None
|
||||
|
||||
def __setitem__(self, index, val):
|
||||
assert val
|
||||
pos = index >> 3
|
||||
mask = 128 >> (index & 7)
|
||||
if self.bits[pos] & mask:
|
||||
return
|
||||
self.bits[pos] |= mask
|
||||
self.numfalse -= 1
|
||||
if self.numfalse == 0:
|
||||
self.bits = None
|
||||
|
||||
def __getitem__(self, index):
|
||||
bits = self.bits
|
||||
if bits is None:
|
||||
return 1
|
||||
return bits[index >> 3] & 128 >> (index & 7)
|
||||
|
||||
def __len__(self):
|
||||
return self.length
|
||||
|
||||
def tostring(self):
|
||||
if self.bits is None:
|
||||
rlen, extra = divmod(self.length, 8)
|
||||
r = chr(0xFF) * rlen
|
||||
if extra:
|
||||
r += chr((0xFF << (8 - extra)) & 0xFF)
|
||||
return r
|
||||
else:
|
||||
return self.bits.tostring()
|
140
BitTorrent/btformats.py
Executable file
140
BitTorrent/btformats.py
Executable file
@ -0,0 +1,140 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Bram Cohen
|
||||
|
||||
import re
|
||||
|
||||
from BitTorrent import BTFailure
|
||||
|
||||
allowed_path_re = re.compile(r'^[^/\\.~][^/\\]*$')
|
||||
|
||||
ints = (long, int)
|
||||
|
||||
def check_info(info, check_paths=True):
|
||||
if type(info) != dict:
|
||||
raise BTFailure, _("bad metainfo - not a dictionary")
|
||||
pieces = info.get('pieces')
|
||||
if type(pieces) != str or len(pieces) % 20 != 0:
|
||||
raise BTFailure, _("bad metainfo - bad pieces key")
|
||||
piecelength = info.get('piece length')
|
||||
if type(piecelength) not in ints or piecelength <= 0:
|
||||
raise BTFailure, _("bad metainfo - illegal piece length")
|
||||
name = info.get('name')
|
||||
if type(name) != str:
|
||||
raise BTFailure, _("bad metainfo - bad name")
|
||||
if not allowed_path_re.match(name):
|
||||
raise BTFailure, _("name %s disallowed for security reasons") % name
|
||||
if info.has_key('files') == info.has_key('length'):
|
||||
raise BTFailure, _("single/multiple file mix")
|
||||
if info.has_key('length'):
|
||||
length = info.get('length')
|
||||
if type(length) not in ints or length < 0:
|
||||
raise BTFailure, _("bad metainfo - bad length")
|
||||
else:
|
||||
files = info.get('files')
|
||||
if type(files) != list:
|
||||
raise BTFailure, _('bad metainfo - "files" is not a list of files')
|
||||
for f in files:
|
||||
if type(f) != dict:
|
||||
raise BTFailure, _("bad metainfo - bad file value")
|
||||
length = f.get('length')
|
||||
if type(length) not in ints or length < 0:
|
||||
raise BTFailure, _("bad metainfo - bad length")
|
||||
path = f.get('path')
|
||||
if type(path) != list or path == []:
|
||||
raise BTFailure, _("bad metainfo - bad path")
|
||||
for p in path:
|
||||
if type(p) != str:
|
||||
raise BTFailure, _("bad metainfo - bad path dir")
|
||||
if check_paths and not allowed_path_re.match(p):
|
||||
raise BTFailure, _("path %s disallowed for security reasons") % p
|
||||
f = ['/'.join(x['path']) for x in files]
|
||||
f.sort()
|
||||
i = iter(f)
|
||||
try:
|
||||
name2 = i.next()
|
||||
while True:
|
||||
name1 = name2
|
||||
name2 = i.next()
|
||||
if name2.startswith(name1):
|
||||
if name1 == name2:
|
||||
raise BTFailure, _("bad metainfo - duplicate path")
|
||||
elif name2[len(name1)] == '/':
|
||||
raise BTFailure(_("bad metainfo - name used as both"
|
||||
"file and subdirectory name"))
|
||||
except StopIteration:
|
||||
pass
|
||||
|
||||
def check_message(message, check_paths=True):
|
||||
if type(message) != dict:
|
||||
raise BTFailure, _("bad metainfo - wrong object type")
|
||||
check_info(message.get('info'), check_paths)
|
||||
if type(message.get('announce')) != str and type(message.get('nodes')) != list:
|
||||
raise BTFailure, _("bad metainfo - no announce URL string")
|
||||
if message.has_key('nodes'):
|
||||
check_nodes(message.get('nodes'))
|
||||
|
||||
def check_nodes(nodes):
|
||||
## note, these strings need changing
|
||||
for node in nodes:
|
||||
if type(node) != list:
|
||||
raise BTFailure, _("bad metainfo - wrong object type") + "0"
|
||||
if len(node) != 2:
|
||||
raise BTFailure, _("bad metainfo - wrong object type") + "1"
|
||||
host, port = node
|
||||
if type(host) != str:
|
||||
raise BTFailure, _("bad metainfo - wrong object type") + "2"
|
||||
if type(port) != int:
|
||||
raise BTFailure, _("bad metainfo - wrong object type") + "3"
|
||||
|
||||
def check_peers(message):
|
||||
if type(message) != dict:
|
||||
raise BTFailure
|
||||
if message.has_key('failure reason'):
|
||||
if type(message['failure reason']) != str:
|
||||
raise BTFailure, _("non-text failure reason")
|
||||
return
|
||||
if message.has_key('warning message'):
|
||||
if type(message['warning message']) != str:
|
||||
raise BTFailure, _("non-text warning message")
|
||||
peers = message.get('peers')
|
||||
if type(peers) == list:
|
||||
for p in peers:
|
||||
if type(p) != dict:
|
||||
raise BTFailure, _("invalid entry in peer list1")
|
||||
if type(p.get('ip')) != str:
|
||||
raise BTFailure, _("invalid entry in peer list2")
|
||||
port = p.get('port')
|
||||
if type(port) not in ints or p <= 0:
|
||||
raise BTFailure, _("invalid entry in peer list3")
|
||||
if p.has_key('peer id'):
|
||||
peerid = p.get('peer id')
|
||||
if type(peerid) != str or len(peerid) != 20:
|
||||
raise BTFailure, _("invalid entry in peer list4")
|
||||
elif type(peers) != str or len(peers) % 6 != 0:
|
||||
raise BTFailure, _("invalid peer list")
|
||||
interval = message.get('interval', 1)
|
||||
if type(interval) not in ints or interval <= 0:
|
||||
raise BTFailure, _("invalid announce interval")
|
||||
minint = message.get('min interval', 1)
|
||||
if type(minint) not in ints or minint <= 0:
|
||||
raise BTFailure, _("invalid min announce interval")
|
||||
if type(message.get('tracker id', '')) != str:
|
||||
raise BTFailure, _("invalid tracker id")
|
||||
npeers = message.get('num peers', 0)
|
||||
if type(npeers) not in ints or npeers < 0:
|
||||
raise BTFailure, _("invalid peer count")
|
||||
dpeers = message.get('done peers', 0)
|
||||
if type(dpeers) not in ints or dpeers < 0:
|
||||
raise BTFailure, _("invalid seed count")
|
||||
last = message.get('last', 0)
|
||||
if type(last) not in ints or last < 0:
|
||||
raise BTFailure, _('invalid "last" entry')
|
217
BitTorrent/configfile.py
Executable file
217
BitTorrent/configfile.py
Executable file
@ -0,0 +1,217 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Uoti Urpala and Matt Chisholm
|
||||
|
||||
import os
|
||||
import sys
|
||||
import gettext
|
||||
import locale
|
||||
|
||||
# Python 2.2 doesn't have RawConfigParser
|
||||
try:
|
||||
from ConfigParser import RawConfigParser
|
||||
except ImportError:
|
||||
from ConfigParser import ConfigParser as RawConfigParser
|
||||
|
||||
from ConfigParser import MissingSectionHeaderError, ParsingError
|
||||
from BitTorrent import parseargs
|
||||
from BitTorrent import app_name, version, ERROR, BTFailure
|
||||
from BitTorrent.platform import get_config_dir, locale_root, is_frozen_exe
|
||||
from BitTorrent.defaultargs import MYTRUE
|
||||
|
||||
TORRENT_CONFIG_FILE = 'torrent_config'
|
||||
|
||||
alt_uiname = {'bittorrent':'btdownloadgui',
|
||||
'maketorrent':'btmaketorrentgui',}
|
||||
|
||||
def _read_config(filename):
|
||||
# check for bad config files (Windows corrupts them all the time)
|
||||
p = RawConfigParser()
|
||||
fp = None
|
||||
try:
|
||||
fp = open(filename)
|
||||
except IOError:
|
||||
pass
|
||||
|
||||
if fp is not None:
|
||||
try:
|
||||
p.readfp(fp, filename=filename)
|
||||
except MissingSectionHeaderError:
|
||||
fp.close()
|
||||
del fp
|
||||
bad_config(filename)
|
||||
except ParsingError:
|
||||
fp.close()
|
||||
del fp
|
||||
bad_config(filename)
|
||||
else:
|
||||
fp.close()
|
||||
return p
|
||||
|
||||
|
||||
def _write_config(error_callback, filename, p):
|
||||
try:
|
||||
f = file(filename, 'w')
|
||||
p.write(f)
|
||||
f.close()
|
||||
except Exception, e:
|
||||
try:
|
||||
f.close()
|
||||
except:
|
||||
pass
|
||||
error_callback(ERROR, _("Could not permanently save options: ")+
|
||||
str(e))
|
||||
|
||||
|
||||
def bad_config(filename):
|
||||
base_bad_filename = filename + '.broken'
|
||||
bad_filename = base_bad_filename
|
||||
i = 0
|
||||
while os.access(bad_filename, os.F_OK):
|
||||
bad_filename = base_bad_filename + str(i)
|
||||
i+=1
|
||||
os.rename(filename, bad_filename)
|
||||
sys.stderr.write(("Error reading config file. "
|
||||
"Old config file stored in \"%s\"") % bad_filename)
|
||||
|
||||
|
||||
def get_config(defaults, section):
|
||||
dir_root = get_config_dir()
|
||||
|
||||
if dir_root is None:
|
||||
return {}
|
||||
|
||||
configdir = os.path.join(dir_root, '.bittorrent')
|
||||
|
||||
if not os.path.isdir(configdir):
|
||||
try:
|
||||
os.mkdir(configdir, 0700)
|
||||
except:
|
||||
pass
|
||||
|
||||
p = _read_config(os.path.join(configdir, 'config'))
|
||||
values = {}
|
||||
if p.has_section(section):
|
||||
for name, value in p.items(section):
|
||||
if name in defaults:
|
||||
values[name] = value
|
||||
if p.has_section('common'):
|
||||
for name, value in p.items('common'):
|
||||
if name in defaults and name not in values:
|
||||
values[name] = value
|
||||
if defaults.get('data_dir') == '' and \
|
||||
'data_dir' not in values and os.path.isdir(configdir):
|
||||
datadir = os.path.join(configdir, 'data')
|
||||
values['data_dir'] = datadir
|
||||
parseargs.parse_options(defaults, values)
|
||||
return values
|
||||
|
||||
|
||||
def save_ui_config(defaults, section, save_options, error_callback):
|
||||
filename = os.path.join(defaults['data_dir'], 'ui_config')
|
||||
p = _read_config(filename)
|
||||
p.remove_section(section)
|
||||
p.add_section(section)
|
||||
for name in save_options:
|
||||
if defaults.has_key(name):
|
||||
p.set(section, name, defaults[name])
|
||||
else:
|
||||
err_str = "Configuration option mismatch: '%s'" % name
|
||||
if is_frozen_exe:
|
||||
err_str = "You must quit %s and reinstall it. (%s)" % (app_name, err_str)
|
||||
error_callback(ERROR, err_str)
|
||||
_write_config(error_callback, filename, p)
|
||||
|
||||
|
||||
def save_torrent_config(path, infohash, config, error_callback):
|
||||
section = infohash.encode('hex')
|
||||
filename = os.path.join(path, TORRENT_CONFIG_FILE)
|
||||
p = _read_config(filename)
|
||||
p.remove_section(section)
|
||||
p.add_section(section)
|
||||
for key, value in config.items():
|
||||
p.set(section, key, value)
|
||||
_write_config(error_callback, filename, p)
|
||||
|
||||
def read_torrent_config(global_config, path, infohash, error_callback):
|
||||
section = infohash.encode('hex')
|
||||
filename = os.path.join(path, TORRENT_CONFIG_FILE)
|
||||
p = _read_config(filename)
|
||||
if not p.has_section(section):
|
||||
return {}
|
||||
else:
|
||||
c = {}
|
||||
for name, value in p.items(section):
|
||||
if global_config.has_key(name):
|
||||
t = type(global_config[name])
|
||||
if t == bool:
|
||||
c[name] = value in ('1', 'True', MYTRUE, True)
|
||||
else:
|
||||
c[name] = type(global_config[name])(value)
|
||||
return c
|
||||
|
||||
def remove_torrent_config(path, infohash, error_callback):
|
||||
section = infohash.encode('hex')
|
||||
filename = os.path.join(path, TORRENT_CONFIG_FILE)
|
||||
p = _read_config(filename)
|
||||
if p.has_section(section):
|
||||
p.remove_section(section)
|
||||
_write_config(error_callback, filename, p)
|
||||
|
||||
def parse_configuration_and_args(defaults, uiname, arglist=[], minargs=0,
|
||||
maxargs=0):
|
||||
defconfig = dict([(name, value) for (name, value, doc) in defaults])
|
||||
if arglist[0:] == ['--version']:
|
||||
print version
|
||||
sys.exit(0)
|
||||
|
||||
if arglist[0:] in (['--help'], ['-h'], ['--usage'], ['-?']):
|
||||
parseargs.printHelp(uiname, defaults)
|
||||
sys.exit(0)
|
||||
|
||||
presets = get_config(defconfig, uiname)
|
||||
config, args = parseargs.parseargs(arglist, defaults, minargs, maxargs,
|
||||
presets)
|
||||
datadir = config['data_dir']
|
||||
if datadir:
|
||||
if uiname in ('bittorrent', 'maketorrent'):
|
||||
values = {}
|
||||
p = _read_config(os.path.join(datadir, 'ui_config'))
|
||||
if not p.has_section(uiname) and p.has_section(alt_uiname[uiname]):
|
||||
uiname = alt_uiname[uiname]
|
||||
if p.has_section(uiname):
|
||||
for name, value in p.items(uiname):
|
||||
if name in defconfig:
|
||||
values[name] = value
|
||||
parseargs.parse_options(defconfig, values)
|
||||
presets.update(values)
|
||||
config, args = parseargs.parseargs(arglist, defaults, minargs,
|
||||
maxargs, presets)
|
||||
|
||||
for d in ('', 'resume', 'metainfo'):
|
||||
ddir = os.path.join(datadir, d)
|
||||
try:
|
||||
if not os.path.exists(ddir):
|
||||
os.mkdir(ddir, 0700)
|
||||
except:
|
||||
pass
|
||||
|
||||
if config['language'] != '':
|
||||
try:
|
||||
lang = gettext.translation('bittorrent', locale_root,
|
||||
languages=[config['language']])
|
||||
lang.install()
|
||||
except IOError:
|
||||
# don't raise an error, just continue untranslated
|
||||
sys.stderr.write('Could not find translation for language "%s"\n' %
|
||||
config['language'])
|
||||
|
||||
return config, args
|
312
BitTorrent/controlsocket.py
Executable file
312
BitTorrent/controlsocket.py
Executable file
@ -0,0 +1,312 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written my Uoti Urpala
|
||||
from __future__ import generators
|
||||
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
if sys.platform.startswith('win'):
|
||||
import win32api
|
||||
import win32event
|
||||
import winerror
|
||||
|
||||
from binascii import b2a_hex
|
||||
|
||||
from BitTorrent.RawServer_magic import RawServer, Handler
|
||||
from BitTorrent.platform import get_home_dir, get_config_dir
|
||||
from BitTorrent import BTFailure, app_name
|
||||
|
||||
def toint(s):
|
||||
return int(b2a_hex(s), 16)
|
||||
|
||||
def tobinary(i):
|
||||
return (chr(i >> 24) + chr((i >> 16) & 0xFF) +
|
||||
chr((i >> 8) & 0xFF) + chr(i & 0xFF))
|
||||
|
||||
CONTROL_SOCKET_PORT = 46881
|
||||
|
||||
class ControlsocketListener(Handler):
|
||||
|
||||
def __init__(self, callback):
|
||||
self.callback = callback
|
||||
|
||||
def connection_made(self, connection):
|
||||
connection.handler = MessageReceiver(self.callback)
|
||||
|
||||
|
||||
class MessageReceiver(Handler):
|
||||
|
||||
def __init__(self, callback):
|
||||
self.callback = callback
|
||||
self._buffer = []
|
||||
self._buffer_len = 0
|
||||
self._reader = self._read_messages()
|
||||
self._next_len = self._reader.next()
|
||||
|
||||
def _read_messages(self):
|
||||
while True:
|
||||
yield 4
|
||||
l = toint(self._message)
|
||||
yield l
|
||||
action = self._message
|
||||
|
||||
if action in ('no-op',):
|
||||
self.callback(action, None)
|
||||
else:
|
||||
yield 4
|
||||
l = toint(self._message)
|
||||
yield l
|
||||
data = self._message
|
||||
if action in ('show_error',):
|
||||
self.callback(action, data)
|
||||
else:
|
||||
yield 4
|
||||
l = toint(self._message)
|
||||
yield l
|
||||
path = self._message
|
||||
if action in ('start_torrent'):
|
||||
self.callback(action, data, path)
|
||||
|
||||
# copied from Connecter.py
|
||||
def data_came_in(self, conn, s):
|
||||
while True:
|
||||
i = self._next_len - self._buffer_len
|
||||
if i > len(s):
|
||||
self._buffer.append(s)
|
||||
self._buffer_len += len(s)
|
||||
return
|
||||
m = s[:i]
|
||||
if self._buffer_len > 0:
|
||||
self._buffer.append(m)
|
||||
m = ''.join(self._buffer)
|
||||
self._buffer = []
|
||||
self._buffer_len = 0
|
||||
s = s[i:]
|
||||
self._message = m
|
||||
try:
|
||||
self._next_len = self._reader.next()
|
||||
except StopIteration:
|
||||
self._reader = None
|
||||
conn.close()
|
||||
return
|
||||
|
||||
def connection_lost(self, conn):
|
||||
self._reader = None
|
||||
pass
|
||||
|
||||
def connection_flushed(self, conn):
|
||||
pass
|
||||
|
||||
|
||||
class ControlSocket(object):
|
||||
|
||||
def __init__(self, config):
|
||||
self.port = CONTROL_SOCKET_PORT
|
||||
self.mutex = None
|
||||
self.master = 0
|
||||
|
||||
self.socket_filename = os.path.join(config['data_dir'], 'ui_socket')
|
||||
|
||||
self.rawserver = None
|
||||
self.controlsocket = None
|
||||
|
||||
def set_rawserver(self, rawserver):
|
||||
self.rawserver = rawserver
|
||||
|
||||
def start_listening(self, callback):
|
||||
self.rawserver.start_listening(self.controlsocket,
|
||||
ControlsocketListener(callback))
|
||||
|
||||
def create_socket_inet(self, port = CONTROL_SOCKET_PORT):
|
||||
|
||||
try:
|
||||
controlsocket = RawServer.create_serversocket(port,
|
||||
'127.0.0.1', reuse=True)
|
||||
except socket.error, e:
|
||||
raise BTFailure(_("Could not create control socket: ")+str(e))
|
||||
|
||||
self.controlsocket = controlsocket
|
||||
|
||||
## def send_command_inet(self, rawserver, action, data = ''):
|
||||
## r = MessageReceiver(lambda action, data: None)
|
||||
## try:
|
||||
## conn = rawserver.start_connection(('127.0.0.1', CONTROL_SOCKET_PORT), r)
|
||||
## except socket.error, e:
|
||||
## raise BTFailure(_("Could not send command: ") + str(e))
|
||||
## conn.write(tobinary(len(action)))
|
||||
## conn.write(action)
|
||||
## conn.write(tobinary(len(data)))
|
||||
## conn.write(data)
|
||||
|
||||
#blocking version without rawserver
|
||||
def send_command_inet(self, action, *datas):
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
try:
|
||||
s.connect(('127.0.0.1', self.port))
|
||||
s.send(tobinary(len(action)))
|
||||
s.send(action)
|
||||
for data in datas:
|
||||
s.send(tobinary(len(data)))
|
||||
s.send(data)
|
||||
s.close()
|
||||
except socket.error, e:
|
||||
try:
|
||||
s.close()
|
||||
except:
|
||||
pass
|
||||
raise BTFailure(_("Could not send command: ") + str(e))
|
||||
|
||||
def create_socket_unix(self):
|
||||
filename = self.socket_filename
|
||||
if os.path.exists(filename):
|
||||
try:
|
||||
self.send_command_unix('no-op')
|
||||
except BTFailure:
|
||||
pass
|
||||
else:
|
||||
raise BTFailure(_("Could not create control socket: already in use"))
|
||||
|
||||
try:
|
||||
os.unlink(filename)
|
||||
except OSError, e:
|
||||
raise BTFailure(_("Could not remove old control socket filename:")
|
||||
+ str(e))
|
||||
try:
|
||||
controlsocket = RawServer.create_unixserversocket(filename)
|
||||
except socket.error, e:
|
||||
raise BTFailure(_("Could not create control socket: ")+str(e))
|
||||
|
||||
self.controlsocket = controlsocket
|
||||
|
||||
## def send_command_unix(self, rawserver, action, data = ''):
|
||||
## s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
## filename = self.socket_filename
|
||||
## try:
|
||||
## s.connect(filename)
|
||||
## except socket.error, e:
|
||||
## raise BTFailure(_("Could not send command: ") + str(e))
|
||||
## r = MessageReceiver(lambda action, data: None)
|
||||
## conn = rawserver.wrap_socket(s, r, ip = s.getpeername())
|
||||
## conn.write(tobinary(len(action)))
|
||||
## conn.write(action)
|
||||
## conn.write(tobinary(len(data)))
|
||||
## conn.write(data)
|
||||
|
||||
# blocking version without rawserver
|
||||
def send_command_unix(self, action, *datas):
|
||||
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
filename = self.socket_filename
|
||||
try:
|
||||
s.connect(filename)
|
||||
s.send(tobinary(len(action)))
|
||||
s.send(action)
|
||||
for data in datas:
|
||||
s.send(tobinary(len(data)))
|
||||
s.send(data)
|
||||
s.close()
|
||||
except socket.error, e:
|
||||
s.close()
|
||||
raise BTFailure(_("Could not send command: ") + str(e))
|
||||
|
||||
def close_socket(self):
|
||||
self.rawserver.stop_listening(self.controlsocket)
|
||||
self.controlsocket.close()
|
||||
|
||||
def get_sic_path(self):
|
||||
directory = get_config_dir()
|
||||
configdir = os.path.join(directory, '.bittorrent')
|
||||
filename = os.path.join(configdir, ".btcontrol")
|
||||
return filename
|
||||
|
||||
def create_sic_socket(self):
|
||||
obtain_mutex = 1
|
||||
mutex = win32event.CreateMutex(None, obtain_mutex, app_name)
|
||||
|
||||
# prevent the PyHANDLE from going out of scope, ints are fine
|
||||
self.mutex = int(mutex)
|
||||
mutex.Detach()
|
||||
|
||||
lasterror = win32api.GetLastError()
|
||||
|
||||
if lasterror == winerror.ERROR_ALREADY_EXISTS:
|
||||
raise BTFailure(_("Global mutex already created."))
|
||||
|
||||
self.master = 1
|
||||
|
||||
# where is the lower limit of the window random port pool? this should stop there
|
||||
port_limit = 50000
|
||||
while self.port < port_limit:
|
||||
try:
|
||||
self.create_socket_inet(self.port)
|
||||
break
|
||||
except BTFailure:
|
||||
self.port += 1
|
||||
|
||||
if self.port >= port_limit:
|
||||
raise BTFailure(_("Could not find an open port!"))
|
||||
|
||||
filename = self.get_sic_path()
|
||||
(path, name) = os.path.split(filename)
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError, e:
|
||||
# 17 is dir exists
|
||||
if e.errno != 17:
|
||||
BTFailure(_("Could not create application data directory!"))
|
||||
f = open(filename, "w")
|
||||
f.write(str(self.port))
|
||||
f.close()
|
||||
|
||||
# we're done writing the control file, release the mutex so other instances can lock it and read the file
|
||||
# but don't destroy the handle until the application closes, so that the names mutex is still around
|
||||
win32event.ReleaseMutex(self.mutex)
|
||||
|
||||
def discover_sic_socket(self):
|
||||
# mutex exists and has been opened (not created). wait for it so we can read the file
|
||||
r = win32event.WaitForSingleObject(self.mutex, win32event.INFINITE)
|
||||
|
||||
# WAIT_OBJECT_0 means the mutex was obtained
|
||||
# WAIT_ABANDONED means the mutex was obtained, and it had previously been abandoned
|
||||
if (r != win32event.WAIT_OBJECT_0) and (r != win32event.WAIT_ABANDONED):
|
||||
BTFailure(_("Could not acquire global mutex lock for controlsocket file!"))
|
||||
|
||||
filename = self.get_sic_path()
|
||||
try:
|
||||
f = open(filename, "r")
|
||||
self.port = int(f.read())
|
||||
f.close()
|
||||
except:
|
||||
self.port = CONTROL_SOCKET_PORT
|
||||
if (r != win32event.WAIT_ABANDONED):
|
||||
sys.stderr.write(_("A previous instance of BT was not cleaned up properly. Continuing."))
|
||||
# what I should really do here is assume the role of master.
|
||||
|
||||
# we're done reading the control file, release the mutex so other instances can lock it and read the file
|
||||
win32event.ReleaseMutex(self.mutex)
|
||||
|
||||
def close_sic_socket(self):
|
||||
if self.master:
|
||||
r = win32event.WaitForSingleObject(self.mutex, win32event.INFINITE)
|
||||
filename = self.get_sic_path()
|
||||
os.remove(filename)
|
||||
self.master = 0
|
||||
win32event.ReleaseMutex(self.mutex)
|
||||
# close it so the named mutex goes away
|
||||
win32api.CloseHandle(self.mutex)
|
||||
self.mutex = None
|
||||
|
||||
if sys.platform.startswith('win'):
|
||||
send_command = send_command_inet
|
||||
create_socket = create_sic_socket
|
||||
else:
|
||||
send_command = send_command_unix
|
||||
create_socket = create_socket_unix
|
267
BitTorrent/defaultargs.py
Executable file
267
BitTorrent/defaultargs.py
Executable file
@ -0,0 +1,267 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
|
||||
# False and True are not distinct from 0 and 1 under Python 2.2,
|
||||
# and we want to handle boolean options differently.
|
||||
class MyBool(object):
|
||||
|
||||
def __init__(self, value):
|
||||
self.value = value
|
||||
|
||||
def __repr__(self):
|
||||
if self.value:
|
||||
return 'True'
|
||||
return 'False'
|
||||
|
||||
def __nonzero__(self):
|
||||
return self.value
|
||||
|
||||
MYTRUE = MyBool(True)
|
||||
MYFALSE = MyBool(False)
|
||||
|
||||
from BitTorrent import languages
|
||||
|
||||
basic_options = [
|
||||
('data_dir', '',
|
||||
_("directory under which variable data such as fastresume information "
|
||||
"and GUI state is saved. Defaults to subdirectory 'data' of the "
|
||||
"bittorrent config directory.")),
|
||||
('filesystem_encoding', '',
|
||||
_("character encoding used on the local filesystem. "
|
||||
"If left empty, autodetected. "
|
||||
"Autodetection doesn't work under python versions older than 2.3")),
|
||||
('language', '',
|
||||
_("ISO Language code to use") + ': ' + ', '.join(languages)),
|
||||
]
|
||||
|
||||
common_options = [
|
||||
('ip', '',
|
||||
_("ip to report to the tracker (has no effect unless you are on the same "
|
||||
"local network as the tracker)")),
|
||||
('forwarded_port', 0,
|
||||
_("world-visible port number if it's different from the one the client "
|
||||
"listens on locally")),
|
||||
('minport', 6881,
|
||||
_("minimum port to listen on, counts up if unavailable")),
|
||||
('maxport', 6999,
|
||||
_("maximum port to listen on")),
|
||||
('bind', '',
|
||||
_("ip to bind to locally")),
|
||||
('display_interval', .5,
|
||||
_("seconds between updates of displayed information")),
|
||||
('rerequest_interval', 5 * 60,
|
||||
_("minutes to wait between requesting more peers")),
|
||||
('min_peers', 20,
|
||||
_("minimum number of peers to not do rerequesting")),
|
||||
('max_initiate', 40,
|
||||
_("number of peers at which to stop initiating new connections")),
|
||||
('max_allow_in', 80,
|
||||
_("maximum number of connections to allow, after this new incoming "
|
||||
"connections will be immediately closed")),
|
||||
('check_hashes', MYTRUE,
|
||||
_("whether to check hashes on disk")),
|
||||
('max_upload_rate', 20,
|
||||
_("maximum kB/s to upload at, 0 means no limit")),
|
||||
('min_uploads', 2,
|
||||
_("the number of uploads to fill out to with extra optimistic unchokes")),
|
||||
('max_files_open', 50,
|
||||
_("the maximum number of files in a multifile torrent to keep open at a "
|
||||
"time, 0 means no limit. Used to avoid running out of file descriptors.")),
|
||||
('start_trackerless_client', MYTRUE,
|
||||
_("Initialize a trackerless client. This must be enabled in order to download trackerless torrents."))
|
||||
]
|
||||
|
||||
|
||||
rare_options = [
|
||||
('keepalive_interval', 120.0,
|
||||
_("number of seconds to pause between sending keepalives")),
|
||||
('download_slice_size', 2 ** 14,
|
||||
_("how many bytes to query for per request.")),
|
||||
('max_message_length', 2 ** 23,
|
||||
_("maximum length prefix encoding you'll accept over the wire - larger "
|
||||
"values get the connection dropped.")),
|
||||
('socket_timeout', 300.0,
|
||||
_("seconds to wait between closing sockets which nothing has been "
|
||||
"received on")),
|
||||
('timeout_check_interval', 60.0,
|
||||
_("seconds to wait between checking if any connections have timed out")),
|
||||
('max_slice_length', 16384,
|
||||
_("maximum length slice to send to peers, close connection if a larger "
|
||||
"request is received")),
|
||||
('max_rate_period', 20.0,
|
||||
_("maximum time interval over which to estimate the current upload and download rates")),
|
||||
('max_rate_period_seedtime', 100.0,
|
||||
_("maximum time interval over which to estimate the current seed rate")),
|
||||
('max_announce_retry_interval', 1800,
|
||||
_("maximum time to wait between retrying announces if they keep failing")),
|
||||
('snub_time', 30.0,
|
||||
_("seconds to wait for data to come in over a connection before assuming "
|
||||
"it's semi-permanently choked")),
|
||||
('rarest_first_cutoff', 4,
|
||||
_("number of downloads at which to switch from random to rarest first")),
|
||||
('upload_unit_size', 1380,
|
||||
_("how many bytes to write into network buffers at once.")),
|
||||
('retaliate_to_garbled_data', MYTRUE,
|
||||
_("refuse further connections from addresses with broken or intentionally "
|
||||
"hostile peers that send incorrect data")),
|
||||
('one_connection_per_ip', MYTRUE,
|
||||
_("do not connect to several peers that have the same IP address")),
|
||||
('peer_socket_tos', 8,
|
||||
_("if nonzero, set the TOS option for peer connections to this value")),
|
||||
('bad_libc_workaround', MYFALSE,
|
||||
_("enable workaround for a bug in BSD libc that makes file reads very slow.")),
|
||||
('tracker_proxy', '',
|
||||
_("address of HTTP proxy to use for tracker connections")),
|
||||
('close_with_rst', 0,
|
||||
_("close connections with RST and avoid the TCP TIME_WAIT state")),
|
||||
('twisted', -1,
|
||||
_("Use Twisted network libraries for network connections. 1 means use twisted, 0 means do not use twisted, -1 means autodetect, and prefer twisted")),
|
||||
]
|
||||
|
||||
|
||||
def get_defaults(ui):
|
||||
assert ui in ("bittorrent" , "bittorrent-curses", "bittorrent-console" ,
|
||||
"maketorrent", "maketorrent-console",
|
||||
"launchmany-curses", "launchmany-console" ,
|
||||
)
|
||||
r = []
|
||||
|
||||
if ui.startswith('bittorrent') or ui.startswith('launchmany'):
|
||||
r.extend(common_options)
|
||||
|
||||
if ui == 'bittorrent':
|
||||
r.extend([
|
||||
('save_as', '',
|
||||
_("file name (for single-file torrents) or directory name (for "
|
||||
"batch torrents) to save the torrent as, overriding the default "
|
||||
"name in the torrent. See also --save_in, if neither is "
|
||||
"specified the user will be asked for save location")),
|
||||
('advanced', MYFALSE,
|
||||
_("display advanced user interface")),
|
||||
('next_torrent_time', 300,
|
||||
_("the maximum number of minutes to seed a completed torrent "
|
||||
"before stopping seeding")),
|
||||
('next_torrent_ratio', 80,
|
||||
_("the minimum upload/download ratio, in percent, to achieve "
|
||||
"before stopping seeding. 0 means no limit.")),
|
||||
('last_torrent_ratio', 0,
|
||||
_("the minimum upload/download ratio, in percent, to achieve "
|
||||
"before stopping seeding the last torrent. 0 means no limit.")),
|
||||
('seed_forever', MYFALSE,
|
||||
_("Seed each completed torrent indefinitely "
|
||||
"(until the user cancels it)")),
|
||||
('seed_last_forever', MYTRUE,
|
||||
_("Seed the last torrent indefinitely "
|
||||
"(until the user cancels it)")),
|
||||
('pause', MYFALSE,
|
||||
_("start downloader in paused state")),
|
||||
('start_torrent_behavior', 'replace',
|
||||
_('specifies how the app should behave when the user manually '
|
||||
'tries to start another torrent: "replace" means always replace '
|
||||
'the running torrent with the new one, "add" means always add '
|
||||
'the running torrent in parallel, and "ask" means ask the user '
|
||||
'each time.')),
|
||||
('open_from', '',
|
||||
'local directory to look in for .torrent files to open'),
|
||||
('ask_for_save', MYFALSE,
|
||||
'whether or not to ask for a location to save downloaded files in'),
|
||||
])
|
||||
|
||||
if ui in ('bittorrent-console', 'bittorrent-curses'):
|
||||
r.append(
|
||||
('save_as', '',
|
||||
_("file name (for single-file torrents) or directory name (for "
|
||||
"batch torrents) to save the torrent as, overriding the "
|
||||
"default name in the torrent. See also --save_in")))
|
||||
|
||||
if ui.startswith('bittorrent'):
|
||||
r.extend([
|
||||
('max_uploads', -1,
|
||||
_("the maximum number of uploads to allow at once. -1 means a "
|
||||
"(hopefully) reasonable number based on --max_upload_rate. "
|
||||
"The automatic values are only sensible when running one "
|
||||
"torrent at a time.")),
|
||||
('save_in', '',
|
||||
_("local directory where the torrent contents will be saved. The "
|
||||
"file (single-file torrents) or directory (batch torrents) will "
|
||||
"be created under this directory using the default name "
|
||||
"specified in the .torrent file. See also --save_as.")),
|
||||
('responsefile', '',
|
||||
_("deprecated, do not use")),
|
||||
('url', '',
|
||||
_("deprecated, do not use")),
|
||||
('ask_for_save', 0,
|
||||
_("whether or not to ask for a location to save downloaded files in")),
|
||||
])
|
||||
|
||||
if ui.startswith('launchmany'):
|
||||
r.extend([
|
||||
('max_uploads', 6,
|
||||
_("the maximum number of uploads to allow at once. -1 means a "
|
||||
"(hopefully) reasonable number based on --max_upload_rate. The "
|
||||
"automatic values are only sensible when running one torrent at "
|
||||
"a time.")),
|
||||
('save_in', '',
|
||||
_("local directory where the torrents will be saved, using a "
|
||||
"name determined by --saveas_style. If this is left empty "
|
||||
"each torrent will be saved under the directory of the "
|
||||
"corresponding .torrent file")),
|
||||
('parse_dir_interval', 60,
|
||||
_("how often to rescan the torrent directory, in seconds") ),
|
||||
('saveas_style', 4,
|
||||
_("How to name torrent downloads: "
|
||||
"1: use name OF torrent file (minus .torrent); "
|
||||
"2: use name encoded IN torrent file; "
|
||||
"3: create a directory with name OF torrent file "
|
||||
"(minus .torrent) and save in that directory using name "
|
||||
"encoded IN torrent file; "
|
||||
"4: if name OF torrent file (minus .torrent) and name "
|
||||
"encoded IN torrent file are identical, use that "
|
||||
"name (style 1/2), otherwise create an intermediate "
|
||||
"directory as in style 3; "
|
||||
"CAUTION: options 1 and 2 have the ability to "
|
||||
"overwrite files without warning and may present "
|
||||
"security issues."
|
||||
) ),
|
||||
('display_path', ui == 'launchmany-console' and MYTRUE or MYFALSE,
|
||||
_("whether to display the full path or the torrent contents for "
|
||||
"each torrent") ),
|
||||
])
|
||||
|
||||
if ui.startswith('launchmany') or ui == 'maketorrent':
|
||||
r.append(
|
||||
('torrent_dir', '',
|
||||
_("directory to look for .torrent files (semi-recursive)")),)
|
||||
|
||||
if ui in ('bittorrent-curses', 'bittorrent-console'):
|
||||
r.append(
|
||||
('spew', MYFALSE,
|
||||
_("whether to display diagnostic info to stdout")))
|
||||
|
||||
if ui.startswith('maketorrent'):
|
||||
r.extend([
|
||||
('piece_size_pow2', 18,
|
||||
_("which power of two to set the piece size to")),
|
||||
('tracker_name', 'http://my.tracker:6969/announce',
|
||||
_("default tracker name")),
|
||||
('tracker_list', '', ''),
|
||||
('use_tracker', MYTRUE,
|
||||
_("if false then make a trackerless torrent, instead of "
|
||||
"announce URL, use reliable node in form of <ip>:<port> or an "
|
||||
"empty string to pull some nodes from your routing table")),
|
||||
])
|
||||
|
||||
r.extend(basic_options)
|
||||
|
||||
if ui.startswith('bittorrent') or ui.startswith('launchmany'):
|
||||
r.extend(rare_options)
|
||||
|
||||
return r
|
56
BitTorrent/defer.py
Executable file
56
BitTorrent/defer.py
Executable file
@ -0,0 +1,56 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
class Deferred(object):
|
||||
def __init__(self):
|
||||
self.callbacks = []
|
||||
self.errbacks = []
|
||||
self.calledBack = False
|
||||
self.erredBack = False
|
||||
self.results = []
|
||||
self.failures = []
|
||||
|
||||
def addCallback(self, cb, args=(), kwargs={}):
|
||||
assert callable(cb)
|
||||
self.callbacks.append((cb, args, kwargs))
|
||||
if self.calledBack:
|
||||
self.doCallbacks(self.results, [(cb, args, kwargs)])
|
||||
return self
|
||||
|
||||
def addErrback(self, cb, args=(), kwargs={}):
|
||||
assert callable(cb)
|
||||
self.errbacks.append((cb, args, kwargs))
|
||||
if self.erredBack:
|
||||
self.doCallbacks(self.failures, [(cb, args, kwargs)])
|
||||
return self
|
||||
|
||||
def addCallbacks(self, cb, eb, args=(), kwargs={},
|
||||
ebargs=(), ebkwargs={}):
|
||||
assert callable(cb)
|
||||
assert callable(eb)
|
||||
self.addCallback(cb, args, kwargs)
|
||||
self.addErrback(eb, ebargs, ebkwargs)
|
||||
|
||||
def callback(self, result):
|
||||
self.results.append(result)
|
||||
self.calledBack = True
|
||||
if self.callbacks:
|
||||
self.doCallbacks([result], self.callbacks)
|
||||
|
||||
def errback(self, failed):
|
||||
self.failures.append(failed)
|
||||
self.erredBack = True
|
||||
if self.errbacks:
|
||||
self.doCallbacks([failed], self.errbacks)
|
||||
|
||||
def doCallbacks(self, results, callbacks):
|
||||
for result in results:
|
||||
for cb, args, kwargs in callbacks:
|
||||
result = cb(result, *args, **kwargs)
|
583
BitTorrent/download.py
Executable file
583
BitTorrent/download.py
Executable file
@ -0,0 +1,583 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Bram Cohen and Uoti Urpala
|
||||
|
||||
from __future__ import division
|
||||
# required for python 2.2
|
||||
from __future__ import generators
|
||||
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
import errno
|
||||
import gc
|
||||
from sha import sha
|
||||
from socket import error as socketerror
|
||||
from random import seed
|
||||
from time import time
|
||||
from cStringIO import StringIO
|
||||
from traceback import print_exc
|
||||
from math import sqrt
|
||||
try:
|
||||
getpid = os.getpid
|
||||
except AttributeError:
|
||||
def getpid():
|
||||
return 1
|
||||
|
||||
from BitTorrent.btformats import check_message
|
||||
from BitTorrent.Choker import Choker
|
||||
from BitTorrent.Storage import Storage, FilePool
|
||||
from BitTorrent.StorageWrapper import StorageWrapper
|
||||
from BitTorrent.Uploader import Upload
|
||||
from BitTorrent.Downloader import Downloader
|
||||
from BitTorrent.Encoder import Encoder, SingleportListener
|
||||
|
||||
from BitTorrent.RateLimiter import MultiRateLimiter as RateLimiter
|
||||
from BitTorrent.RateLimiter import RateLimitedGroup
|
||||
|
||||
from BitTorrent.RawServer_magic import RawServer
|
||||
from BitTorrent.Rerequester import Rerequester, DHTRerequester
|
||||
from BitTorrent.DownloaderFeedback import DownloaderFeedback
|
||||
from BitTorrent.RateMeasure import RateMeasure
|
||||
from BitTorrent.CurrentRateMeasure import Measure
|
||||
from BitTorrent.PiecePicker import PiecePicker
|
||||
from BitTorrent.ConvertedMetainfo import set_filesystem_encoding
|
||||
from BitTorrent import version
|
||||
from BitTorrent import BTFailure, BTShutdown, INFO, WARNING, ERROR, CRITICAL
|
||||
|
||||
from khashmir.utkhashmir import UTKhashmir
|
||||
from khashmir import const
|
||||
|
||||
class Feedback(object):
|
||||
|
||||
def finished(self, torrent):
|
||||
pass
|
||||
|
||||
def failed(self, torrent, is_external):
|
||||
pass
|
||||
|
||||
def error(self, torrent, level, text):
|
||||
pass
|
||||
|
||||
def exception(self, torrent, text):
|
||||
self.error(torrent, CRITICAL, text)
|
||||
|
||||
def started(self, torrent):
|
||||
pass
|
||||
|
||||
|
||||
class Multitorrent(object):
|
||||
|
||||
def __init__(self, config, doneflag, errorfunc, listen_fail_ok=False):
|
||||
self.dht = None
|
||||
self.config = config
|
||||
self.errorfunc = errorfunc
|
||||
self.rawserver = RawServer(doneflag, config, errorfunc=errorfunc,
|
||||
tos=config['peer_socket_tos'])
|
||||
self.singleport_listener = SingleportListener(self.rawserver)
|
||||
self.ratelimiter = RateLimiter(self.rawserver.add_task)
|
||||
self.ratelimiter.set_parameters(config['max_upload_rate'],
|
||||
config['upload_unit_size'])
|
||||
self._find_port(listen_fail_ok)
|
||||
self.filepool = FilePool(config['max_files_open'])
|
||||
set_filesystem_encoding(config['filesystem_encoding'],
|
||||
errorfunc)
|
||||
|
||||
|
||||
def _find_port(self, listen_fail_ok=True):
|
||||
e = _("maxport less than minport - no ports to check")
|
||||
if self.config['minport'] <= 0:
|
||||
self.config['minport'] = 1
|
||||
for port in xrange(self.config['minport'], self.config['maxport'] + 1):
|
||||
try:
|
||||
self.singleport_listener.open_port(port, self.config)
|
||||
if self.config['start_trackerless_client']:
|
||||
self.dht = UTKhashmir(self.config['bind'],
|
||||
self.singleport_listener.get_port(),
|
||||
self.config['data_dir'], self.rawserver,
|
||||
int(self.config['max_upload_rate'] * 1024 * 0.01),
|
||||
rlcount=self.ratelimiter.increase_offset,
|
||||
config=self.config)
|
||||
break
|
||||
except socketerror, e:
|
||||
pass
|
||||
else:
|
||||
if not listen_fail_ok:
|
||||
raise BTFailure, _("Could not open a listening port: %s.") % str(e)
|
||||
self.errorfunc(CRITICAL,
|
||||
_("Could not open a listening port: %s. ") %
|
||||
str(e) +
|
||||
_("Check your port range settings."))
|
||||
|
||||
def close_listening_socket(self):
|
||||
self.singleport_listener.close_sockets()
|
||||
|
||||
def start_torrent(self, metainfo, config, feedback, filename):
|
||||
torrent = _SingleTorrent(self.rawserver, self.singleport_listener,
|
||||
self.ratelimiter, self.filepool, config, self.dht)
|
||||
torrent.rlgroup = RateLimitedGroup(config['max_upload_rate'], torrent.got_exception)
|
||||
self.rawserver.add_context(torrent)
|
||||
def start():
|
||||
torrent.start_download(metainfo, feedback, filename)
|
||||
self.rawserver.external_add_task(start, 0, context=torrent)
|
||||
return torrent
|
||||
|
||||
def set_option(self, option, value):
|
||||
self.config[option] = value
|
||||
if option in ['max_upload_rate', 'upload_unit_size']:
|
||||
self.ratelimiter.set_parameters(self.config['max_upload_rate'],
|
||||
self.config['upload_unit_size'])
|
||||
elif option == 'max_files_open':
|
||||
self.filepool.set_max_files_open(value)
|
||||
elif option == 'maxport':
|
||||
if not self.config['minport'] <= self.singleport_listener.port <= \
|
||||
self.config['maxport']:
|
||||
self._find_port()
|
||||
|
||||
def get_completion(self, config, metainfo, save_path, filelist=False):
|
||||
if not config['data_dir']:
|
||||
return None
|
||||
infohash = metainfo.infohash
|
||||
if metainfo.is_batch:
|
||||
myfiles = [os.path.join(save_path, f) for f in metainfo.files_fs]
|
||||
else:
|
||||
myfiles = [save_path]
|
||||
|
||||
if metainfo.total_bytes == 0:
|
||||
if filelist:
|
||||
return None
|
||||
return 1
|
||||
try:
|
||||
s = Storage(None, None, zip(myfiles, metainfo.sizes),
|
||||
check_only=True)
|
||||
except:
|
||||
return None
|
||||
filename = os.path.join(config['data_dir'], 'resume',
|
||||
infohash.encode('hex'))
|
||||
try:
|
||||
f = file(filename, 'rb')
|
||||
except:
|
||||
f = None
|
||||
try:
|
||||
r = s.check_fastresume(f, filelist, metainfo.piece_length,
|
||||
len(metainfo.hashes), myfiles)
|
||||
except:
|
||||
r = None
|
||||
if f is not None:
|
||||
f.close()
|
||||
if r is None:
|
||||
return None
|
||||
if filelist:
|
||||
return r[0] / metainfo.total_bytes, r[1], r[2]
|
||||
return r / metainfo.total_bytes
|
||||
|
||||
|
||||
class _SingleTorrent(object):
|
||||
|
||||
def __init__(self, rawserver, singleport_listener, ratelimiter, filepool,
|
||||
config, dht):
|
||||
self._rawserver = rawserver
|
||||
self._singleport_listener = singleport_listener
|
||||
self._ratelimiter = ratelimiter
|
||||
self._filepool = filepool
|
||||
self._dht = dht
|
||||
self._storage = None
|
||||
self._storagewrapper = None
|
||||
self._ratemeasure = None
|
||||
self._upmeasure = None
|
||||
self._downmeasure = None
|
||||
self._encoder = None
|
||||
self._rerequest = None
|
||||
self._statuscollecter = None
|
||||
self._announced = False
|
||||
self._listening = False
|
||||
self.reserved_ports = []
|
||||
self.reported_port = None
|
||||
self._myfiles = None
|
||||
self.started = False
|
||||
self.is_seed = False
|
||||
self.closed = False
|
||||
self.infohash = None
|
||||
self.total_bytes = None
|
||||
self._doneflag = threading.Event()
|
||||
self.finflag = threading.Event()
|
||||
self._hashcheck_thread = None
|
||||
self._contfunc = None
|
||||
self._activity = (_("Initial startup"), 0)
|
||||
self.feedback = None
|
||||
self.errors = []
|
||||
self.rlgroup = None
|
||||
self.config = config
|
||||
|
||||
def start_download(self, *args, **kwargs):
|
||||
it = self._start_download(*args, **kwargs)
|
||||
def cont():
|
||||
try:
|
||||
it.next()
|
||||
except StopIteration:
|
||||
self._contfunc = None
|
||||
def contfunc():
|
||||
self._rawserver.external_add_task(cont, 0, context=self)
|
||||
self._contfunc = contfunc
|
||||
contfunc()
|
||||
|
||||
def _start_download(self, metainfo, feedback, save_path):
|
||||
self.feedback = feedback
|
||||
config = self.config
|
||||
|
||||
self.infohash = metainfo.infohash
|
||||
self.total_bytes = metainfo.total_bytes
|
||||
if not metainfo.reported_errors:
|
||||
metainfo.show_encoding_errors(self._error)
|
||||
|
||||
myid = self._make_id()
|
||||
seed(myid)
|
||||
def schedfunc(func, delay):
|
||||
self._rawserver.add_task(func, delay, context=self)
|
||||
def externalsched(func, delay):
|
||||
self._rawserver.external_add_task(func, delay, context=self)
|
||||
if metainfo.is_batch:
|
||||
myfiles = [os.path.join(save_path, f) for f in metainfo.files_fs]
|
||||
else:
|
||||
myfiles = [save_path]
|
||||
self._filepool.add_files(myfiles, self)
|
||||
self._myfiles = myfiles
|
||||
self._storage = Storage(config, self._filepool, zip(myfiles,
|
||||
metainfo.sizes))
|
||||
resumefile = None
|
||||
if config['data_dir']:
|
||||
filename = os.path.join(config['data_dir'], 'resume',
|
||||
self.infohash.encode('hex'))
|
||||
if os.path.exists(filename):
|
||||
try:
|
||||
resumefile = file(filename, 'rb')
|
||||
if self._storage.check_fastresume(resumefile) == 0:
|
||||
resumefile.close()
|
||||
resumefile = None
|
||||
except Exception, e:
|
||||
self._error(WARNING,
|
||||
_("Could not load fastresume data: %s.") % str(e)
|
||||
+ ' ' + _("Will perform full hash check."))
|
||||
if resumefile is not None:
|
||||
resumefile.close()
|
||||
resumefile = None
|
||||
def data_flunked(amount, index):
|
||||
self._ratemeasure.data_rejected(amount)
|
||||
self._error(INFO,
|
||||
_("piece %d failed hash check, re-downloading it")
|
||||
% index)
|
||||
backthread_exception = []
|
||||
def errorfunc(level, text):
|
||||
def e():
|
||||
self._error(level, text)
|
||||
externalsched(e, 0)
|
||||
def hashcheck():
|
||||
def statusfunc(activity = None, fractionDone = 0):
|
||||
if activity is None:
|
||||
activity = self._activity[0]
|
||||
self._activity = (activity, fractionDone)
|
||||
try:
|
||||
self._storagewrapper = StorageWrapper(self._storage,
|
||||
config, metainfo.hashes, metainfo.piece_length,
|
||||
self._finished, statusfunc, self._doneflag, data_flunked,
|
||||
self.infohash, errorfunc, resumefile)
|
||||
except:
|
||||
backthread_exception.append(sys.exc_info())
|
||||
self._contfunc()
|
||||
thread = threading.Thread(target = hashcheck)
|
||||
thread.setDaemon(False)
|
||||
self._hashcheck_thread = thread
|
||||
thread.start()
|
||||
yield None
|
||||
self._hashcheck_thread = None
|
||||
if resumefile is not None:
|
||||
resumefile.close()
|
||||
if backthread_exception:
|
||||
a, b, c = backthread_exception[0]
|
||||
raise a, b, c
|
||||
|
||||
if self._storagewrapper.amount_left == 0:
|
||||
self._finished()
|
||||
choker = Choker(config, schedfunc, self.finflag.isSet)
|
||||
upmeasure = Measure(config['max_rate_period'])
|
||||
upmeasure_seedtime = Measure(config['max_rate_period_seedtime'])
|
||||
downmeasure = Measure(config['max_rate_period'])
|
||||
self._upmeasure = upmeasure
|
||||
self._upmeasure_seedtime = upmeasure_seedtime
|
||||
self._downmeasure = downmeasure
|
||||
self._ratemeasure = RateMeasure(self._storagewrapper.
|
||||
amount_left_with_partials)
|
||||
picker = PiecePicker(len(metainfo.hashes), config)
|
||||
for i in xrange(len(metainfo.hashes)):
|
||||
if self._storagewrapper.do_I_have(i):
|
||||
picker.complete(i)
|
||||
for i in self._storagewrapper.stat_dirty:
|
||||
picker.requested(i)
|
||||
def kickpeer(connection):
|
||||
def kick():
|
||||
connection.close()
|
||||
schedfunc(kick, 0)
|
||||
def banpeer(ip):
|
||||
self._encoder.ban(ip)
|
||||
downloader = Downloader(config, self._storagewrapper, picker,
|
||||
len(metainfo.hashes), downmeasure, self._ratemeasure.data_came_in,
|
||||
kickpeer, banpeer)
|
||||
def make_upload(connection):
|
||||
return Upload(connection, self._ratelimiter, upmeasure,
|
||||
upmeasure_seedtime, choker, self._storagewrapper,
|
||||
config['max_slice_length'], config['max_rate_period'])
|
||||
|
||||
|
||||
self.reported_port = self.config['forwarded_port']
|
||||
if not self.reported_port:
|
||||
self.reported_port = self._singleport_listener.get_port()
|
||||
self.reserved_ports.append(self.reported_port)
|
||||
|
||||
if self._dht:
|
||||
addContact = self._dht.addContact
|
||||
else:
|
||||
addContact = None
|
||||
self._encoder = Encoder(make_upload, downloader, choker,
|
||||
len(metainfo.hashes), self._ratelimiter, self._rawserver,
|
||||
config, myid, schedfunc, self.infohash, self, addContact, self.reported_port)
|
||||
|
||||
self._singleport_listener.add_torrent(self.infohash, self._encoder)
|
||||
self._listening = True
|
||||
if metainfo.is_trackerless:
|
||||
if not self._dht:
|
||||
self._error(self, CRITICAL, _("Attempt to download a trackerless torrent with trackerless client turned off."))
|
||||
return
|
||||
else:
|
||||
if len(self._dht.table.findNodes(metainfo.infohash, invalid=False)) < const.K:
|
||||
for host, port in metainfo.nodes:
|
||||
self._dht.addContact(host, port)
|
||||
self._rerequest = DHTRerequester(config,
|
||||
schedfunc, self._encoder.how_many_connections,
|
||||
self._encoder.start_connection, externalsched,
|
||||
self._storagewrapper.get_amount_left, upmeasure.get_total,
|
||||
downmeasure.get_total, self.reported_port, myid,
|
||||
self.infohash, self._error, self.finflag, upmeasure.get_rate,
|
||||
downmeasure.get_rate, self._encoder.ever_got_incoming,
|
||||
self.internal_shutdown, self._announce_done, self._dht)
|
||||
else:
|
||||
self._rerequest = Rerequester(metainfo.announce, config,
|
||||
schedfunc, self._encoder.how_many_connections,
|
||||
self._encoder.start_connection, externalsched,
|
||||
self._storagewrapper.get_amount_left, upmeasure.get_total,
|
||||
downmeasure.get_total, self.reported_port, myid,
|
||||
self.infohash, self._error, self.finflag, upmeasure.get_rate,
|
||||
downmeasure.get_rate, self._encoder.ever_got_incoming,
|
||||
self.internal_shutdown, self._announce_done)
|
||||
|
||||
self._statuscollecter = DownloaderFeedback(choker, upmeasure.get_rate,
|
||||
upmeasure_seedtime.get_rate, downmeasure.get_rate,
|
||||
upmeasure.get_total, downmeasure.get_total,
|
||||
self._ratemeasure.get_time_left, self._ratemeasure.get_size_left,
|
||||
self.total_bytes, self.finflag, downloader, self._myfiles,
|
||||
self._encoder.ever_got_incoming, self._rerequest)
|
||||
|
||||
self._announced = True
|
||||
if self._dht and len(self._dht.table.findNodes(self.infohash)) == 0:
|
||||
self._rawserver.add_task(self._dht.findCloseNodes, 5)
|
||||
self._rawserver.add_task(self._rerequest.begin, 20)
|
||||
else:
|
||||
self._rerequest.begin()
|
||||
self.started = True
|
||||
if not self.finflag.isSet():
|
||||
self._activity = (_("downloading"), 0)
|
||||
self.feedback.started(self)
|
||||
|
||||
def got_exception(self, e):
|
||||
is_external = False
|
||||
if isinstance(e, BTShutdown):
|
||||
self._error(ERROR, str(e))
|
||||
is_external = True
|
||||
elif isinstance(e, BTFailure):
|
||||
self._error(CRITICAL, str(e))
|
||||
self._activity = ( _("download failed: ") + str(e), 0)
|
||||
elif isinstance(e, IOError):
|
||||
msg = 'IO Error ' + str(e)
|
||||
if e.errno == errno.ENOSPC:
|
||||
msg = _("IO Error: No space left on disk, "
|
||||
"or cannot create a file that large:") + str(e)
|
||||
self._error(CRITICAL, msg)
|
||||
self._activity = (_("killed by IO error: ") + str(e), 0)
|
||||
elif isinstance(e, OSError):
|
||||
self._error(CRITICAL, 'OS Error ' + str(e))
|
||||
self._activity = (_("killed by OS error: ") + str(e), 0)
|
||||
else:
|
||||
data = StringIO()
|
||||
print_exc(file=data)
|
||||
self._error(CRITICAL, data.getvalue(), True)
|
||||
self._activity = (_("killed by internal exception: ") + str(e), 0)
|
||||
try:
|
||||
self._close()
|
||||
except Exception, e:
|
||||
self._error(ERROR,
|
||||
_("Additional error when closing down due to error: ") +
|
||||
str(e))
|
||||
if is_external:
|
||||
self.feedback.failed(self, True)
|
||||
return
|
||||
if self.config['data_dir'] and self._storage is not None:
|
||||
filename = os.path.join(self.config['data_dir'], 'resume',
|
||||
self.infohash.encode('hex'))
|
||||
if os.path.exists(filename):
|
||||
try:
|
||||
os.remove(filename)
|
||||
except Exception, e:
|
||||
self._error(WARNING,
|
||||
_("Could not remove fastresume file after "
|
||||
"failure:")
|
||||
+ str(e))
|
||||
self.feedback.failed(self, False)
|
||||
|
||||
def _finished(self):
|
||||
self.finflag.set()
|
||||
# Call self._storage.close() to flush buffers and change files to
|
||||
# read-only mode (when they're possibly reopened). Let exceptions
|
||||
# from self._storage.close() kill the torrent since files might not
|
||||
# be correct on disk if file.close() failed.
|
||||
self._storage.close()
|
||||
# If we haven't announced yet, normal first announce done later will
|
||||
# tell the tracker about seed status.
|
||||
self.is_seed = True
|
||||
if self._announced:
|
||||
self._rerequest.announce_finish()
|
||||
self._activity = (_("seeding"), 1)
|
||||
if self.config['check_hashes']:
|
||||
self._save_fastresume(True)
|
||||
self.feedback.finished(self)
|
||||
|
||||
def _save_fastresume(self, on_finish=False):
|
||||
if not on_finish and (self.finflag.isSet() or not self.started):
|
||||
return
|
||||
if not self.config['data_dir']:
|
||||
return
|
||||
if on_finish: # self._ratemeasure might not exist yet
|
||||
amount_done = self.total_bytes
|
||||
else:
|
||||
amount_done = self.total_bytes - self._ratemeasure.get_size_left()
|
||||
filename = os.path.join(self.config['data_dir'], 'resume',
|
||||
self.infohash.encode('hex'))
|
||||
resumefile = None
|
||||
try:
|
||||
resumefile = file(filename, 'wb')
|
||||
self._storage.write_fastresume(resumefile, amount_done)
|
||||
self._storagewrapper.write_fastresume(resumefile)
|
||||
resumefile.close()
|
||||
except Exception, e:
|
||||
self._error(WARNING, _("Could not write fastresume data: ") + str(e))
|
||||
if resumefile is not None:
|
||||
resumefile.close()
|
||||
|
||||
def shutdown(self):
|
||||
if self.closed:
|
||||
return
|
||||
try:
|
||||
self._close()
|
||||
self._save_fastresume()
|
||||
self._activity = (_("shut down"), 0)
|
||||
except Exception, e:
|
||||
self.got_exception(e)
|
||||
|
||||
def internal_shutdown(self, level, text):
|
||||
# This is only called when announce fails with no peers,
|
||||
# don't try to announce again telling we're leaving the torrent
|
||||
self._announced = False
|
||||
self._error(level, text)
|
||||
self.shutdown()
|
||||
self.feedback.failed(self, True)
|
||||
|
||||
def _close(self):
|
||||
if self.closed:
|
||||
return
|
||||
self.closed = True
|
||||
self._rawserver.remove_context(self)
|
||||
self._doneflag.set()
|
||||
if self._announced:
|
||||
self._rerequest.announce_stop()
|
||||
self._rerequest.cleanup()
|
||||
if self._hashcheck_thread is not None:
|
||||
self._hashcheck_thread.join() # should die soon after doneflag set
|
||||
if self._myfiles is not None:
|
||||
self._filepool.remove_files(self._myfiles)
|
||||
if self._listening:
|
||||
self._singleport_listener.remove_torrent(self.infohash)
|
||||
for port in self.reserved_ports:
|
||||
self._singleport_listener.release_port(port)
|
||||
if self._encoder is not None:
|
||||
self._encoder.close_connections()
|
||||
if self._storage is not None:
|
||||
self._storage.close()
|
||||
self._ratelimiter.clean_closed()
|
||||
self._rawserver.add_task(gc.collect, 0)
|
||||
|
||||
def get_status(self, spew = False, fileinfo=False):
|
||||
if self.started and not self.closed:
|
||||
r = self._statuscollecter.get_statistics(spew, fileinfo)
|
||||
r['activity'] = self._activity[0]
|
||||
else:
|
||||
r = dict(zip(('activity', 'fractionDone'), self._activity))
|
||||
return r
|
||||
|
||||
def get_total_transfer(self):
|
||||
if self._upmeasure is None:
|
||||
return (0, 0)
|
||||
return (self._upmeasure.get_total(), self._downmeasure.get_total())
|
||||
|
||||
def set_option(self, option, value):
|
||||
if self.closed:
|
||||
return
|
||||
if self.config.has_key(option) and self.config[option] == value:
|
||||
return
|
||||
self.config[option] = value
|
||||
if option == 'max_upload_rate':
|
||||
# make sure counters get reset so new rate applies immediately
|
||||
self.rlgroup.set_rate(value)
|
||||
|
||||
def change_port(self):
|
||||
if not self._listening:
|
||||
return
|
||||
r = self.config['forwarded_port']
|
||||
if r:
|
||||
for port in self.reserved_ports:
|
||||
self._singleport_listener.release_port(port)
|
||||
del self.reserved_ports[:]
|
||||
if self.reported_port == r:
|
||||
return
|
||||
elif self._singleport_listener.port != self.reported_port:
|
||||
r = self._singleport_listener.get_port()
|
||||
self.reserved_ports.append(r)
|
||||
else:
|
||||
return
|
||||
self.reported_port = r
|
||||
myid = self._make_id()
|
||||
self._encoder.my_id = myid
|
||||
self._rerequest.change_port(myid, r)
|
||||
|
||||
def _announce_done(self):
|
||||
for port in self.reserved_ports[:-1]:
|
||||
self._singleport_listener.release_port(port)
|
||||
del self.reserved_ports[:-1]
|
||||
|
||||
def _make_id(self):
|
||||
myid = 'M' + version.split()[0].replace('.', '-')
|
||||
myid = myid + ('-' * (8-len(myid)))+sha(repr(time())+ ' ' +
|
||||
str(getpid())).digest()[-6:].encode('hex')
|
||||
return myid
|
||||
|
||||
def _error(self, level, text, exception=False):
|
||||
self.errors.append((time(), level, text))
|
||||
if exception:
|
||||
self.feedback.exception(self, text)
|
||||
else:
|
||||
self.feedback.error(self, level, text)
|
260
BitTorrent/launchmanycore.py
Executable file
260
BitTorrent/launchmanycore.py
Executable file
@ -0,0 +1,260 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Original version written by John Hoffman, heavily modified for different
|
||||
# multitorrent architecture by Uoti Urpala (over 40% shorter than original)
|
||||
|
||||
import os
|
||||
from cStringIO import StringIO
|
||||
from traceback import print_exc
|
||||
|
||||
from BitTorrent import configfile
|
||||
from BitTorrent.parsedir import parsedir
|
||||
from BitTorrent.download import Multitorrent, Feedback
|
||||
from BitTorrent.ConvertedMetainfo import ConvertedMetainfo
|
||||
from BitTorrent import BTFailure
|
||||
|
||||
from threading import Event
|
||||
from time import time
|
||||
|
||||
|
||||
class LaunchMany(Feedback):
|
||||
|
||||
def __init__(self, config, output, configfile_key):
|
||||
try:
|
||||
self.config = config
|
||||
self.output = output
|
||||
self.configfile_key = configfile_key
|
||||
|
||||
self.torrent_dir = config['torrent_dir']
|
||||
self.torrent_cache = {}
|
||||
self.file_cache = {}
|
||||
self.blocked_files = {}
|
||||
|
||||
self.torrent_list = []
|
||||
self.downloads = {}
|
||||
self.doneflag = Event()
|
||||
|
||||
self.hashcheck_queue = []
|
||||
self.hashcheck_store = {}
|
||||
self.hashcheck_current = None
|
||||
|
||||
self.multitorrent = Multitorrent(config, self.doneflag,
|
||||
self.global_error)
|
||||
self.rawserver = self.multitorrent.rawserver
|
||||
|
||||
self.rawserver.add_task(self.scan, 0)
|
||||
self.rawserver.add_task(self.stats, 0)
|
||||
|
||||
try:
|
||||
import signal
|
||||
def handler(signum, frame):
|
||||
self.rawserver.external_add_task(self.read_config, 0)
|
||||
signal.signal(signal.SIGHUP, handler)
|
||||
self.rawserver.install_sigint_handler()
|
||||
except Exception, e:
|
||||
self.output.message(_("Could not set signal handler: ") + str(e))
|
||||
|
||||
self.rawserver.listen_forever()
|
||||
|
||||
self.output.message(_("shutting down"))
|
||||
for infohash in self.torrent_list:
|
||||
self.output.message(_('dropped "%s"') %
|
||||
self.torrent_cache[infohash]['path'])
|
||||
torrent = self.downloads[infohash]
|
||||
if torrent is not None:
|
||||
torrent.shutdown()
|
||||
except:
|
||||
data = StringIO()
|
||||
print_exc(file = data)
|
||||
output.exception(data.getvalue())
|
||||
|
||||
def scan(self):
|
||||
self.rawserver.add_task(self.scan, self.config['parse_dir_interval'])
|
||||
|
||||
r = parsedir(self.torrent_dir, self.torrent_cache,
|
||||
self.file_cache, self.blocked_files,
|
||||
self.output.message)
|
||||
|
||||
( self.torrent_cache, self.file_cache, self.blocked_files,
|
||||
added, removed ) = r
|
||||
|
||||
for infohash, data in removed.items():
|
||||
self.output.message(_('dropped "%s"') % data['path'])
|
||||
self.remove(infohash)
|
||||
for infohash, data in added.items():
|
||||
self.output.message(_('added "%s"' ) % data['path'])
|
||||
self.add(infohash, data)
|
||||
|
||||
def stats(self):
|
||||
self.rawserver.add_task(self.stats, self.config['display_interval'])
|
||||
data = []
|
||||
for infohash in self.torrent_list:
|
||||
cache = self.torrent_cache[infohash]
|
||||
if self.config['display_path']:
|
||||
name = cache['path']
|
||||
else:
|
||||
name = cache['name']
|
||||
size = cache['length']
|
||||
d = self.downloads[infohash]
|
||||
progress = '0.0%'
|
||||
peers = 0
|
||||
seeds = 0
|
||||
seedsmsg = "S"
|
||||
dist = 0.0
|
||||
uprate = 0.0
|
||||
dnrate = 0.0
|
||||
upamt = 0
|
||||
dnamt = 0
|
||||
t = 0
|
||||
msg = ''
|
||||
if d is None:
|
||||
status = _("waiting for hash check")
|
||||
else:
|
||||
stats = d.get_status()
|
||||
status = stats['activity']
|
||||
progress = '%.1f%%' % (int(stats['fractionDone']*1000)/10.0)
|
||||
if d.started and not d.closed:
|
||||
s = stats
|
||||
dist = s['numCopies']
|
||||
if d.is_seed:
|
||||
seeds = 0 # s['numOldSeeds']
|
||||
seedsmsg = "s"
|
||||
else:
|
||||
if s['numSeeds'] + s['numPeers']:
|
||||
t = stats['timeEst']
|
||||
if t is None:
|
||||
t = -1
|
||||
if t == 0: # unlikely
|
||||
t = 0.01
|
||||
status = _("downloading")
|
||||
else:
|
||||
t = -1
|
||||
status = _("connecting to peers")
|
||||
seeds = s['numSeeds']
|
||||
dnrate = stats['downRate']
|
||||
peers = s['numPeers']
|
||||
uprate = stats['upRate']
|
||||
upamt = s['upTotal']
|
||||
dnamt = s['downTotal']
|
||||
if d.errors and (d.closed or d.errors[-1][0] + 300 > time()):
|
||||
msg = d.errors[-1][2]
|
||||
|
||||
data.append(( name, status, progress, peers, seeds, seedsmsg, dist,
|
||||
uprate, dnrate, upamt, dnamt, size, t, msg ))
|
||||
stop = self.output.display(data)
|
||||
if stop:
|
||||
self.doneflag.set()
|
||||
|
||||
def remove(self, infohash):
|
||||
self.torrent_list.remove(infohash)
|
||||
if self.downloads[infohash] is not None:
|
||||
self.downloads[infohash].shutdown()
|
||||
self.was_stopped(infohash)
|
||||
del self.downloads[infohash]
|
||||
|
||||
def add(self, infohash, data):
|
||||
self.torrent_list.append(infohash)
|
||||
self.downloads[infohash] = None
|
||||
self.hashcheck_queue.append(infohash)
|
||||
self.hashcheck_store[infohash] = data['metainfo']
|
||||
self.check_hashcheck_queue()
|
||||
|
||||
def check_hashcheck_queue(self):
|
||||
if self.hashcheck_current is not None or not self.hashcheck_queue:
|
||||
return
|
||||
self.hashcheck_current = self.hashcheck_queue.pop(0)
|
||||
metainfo = self.hashcheck_store[self.hashcheck_current]
|
||||
del self.hashcheck_store[self.hashcheck_current]
|
||||
filename = self.determine_filename(self.hashcheck_current)
|
||||
self.downloads[self.hashcheck_current] = self.multitorrent. \
|
||||
start_torrent(ConvertedMetainfo(metainfo),
|
||||
self.config, self, filename)
|
||||
|
||||
def determine_filename(self, infohash):
|
||||
x = self.torrent_cache[infohash]
|
||||
name = x['name']
|
||||
savein = self.config['save_in']
|
||||
isdir = not x['metainfo']['info'].has_key('length')
|
||||
style = self.config['saveas_style']
|
||||
if style == 4:
|
||||
torrentname = os.path.split(x['path'][:-8])[1]
|
||||
suggestedname = name
|
||||
if torrentname == suggestedname:
|
||||
style = 1
|
||||
else:
|
||||
style = 3
|
||||
|
||||
if style == 1 or style == 3:
|
||||
if savein:
|
||||
saveas = os.path.join(savein,x['file'][:-8]) # strip '.torrent'
|
||||
else:
|
||||
saveas = x['path'][:-8] # strip '.torrent'
|
||||
if style == 3 and not isdir:
|
||||
saveas = os.path.join(saveas, name)
|
||||
else:
|
||||
if savein:
|
||||
saveas = os.path.join(savein, name)
|
||||
else:
|
||||
saveas = os.path.join(os.path.split(x['path'])[0], name)
|
||||
return saveas
|
||||
|
||||
def was_stopped(self, infohash):
|
||||
try:
|
||||
self.hashcheck_queue.remove(infohash)
|
||||
except:
|
||||
pass
|
||||
else:
|
||||
del self.hashcheck_store[infohash]
|
||||
if self.hashcheck_current == infohash:
|
||||
self.hashcheck_current = None
|
||||
self.check_hashcheck_queue()
|
||||
|
||||
def global_error(self, level, text):
|
||||
self.output.message(text)
|
||||
|
||||
def exchandler(self, s):
|
||||
self.output.exception(s)
|
||||
|
||||
def read_config(self):
|
||||
try:
|
||||
newvalues = configfile.get_config(self.config, self.configfile_key)
|
||||
except Exception, e:
|
||||
self.output.message(_("Error reading config: ") + str(e))
|
||||
return
|
||||
self.output.message(_("Rereading config file"))
|
||||
self.config.update(newvalues)
|
||||
# The set_option call can potentially trigger something that kills
|
||||
# the torrent (when writing this the only possibility is a change in
|
||||
# max_files_open causing an IOError while closing files), and so
|
||||
# the self.failed() callback can run during this loop.
|
||||
for option, value in newvalues.iteritems():
|
||||
self.multitorrent.set_option(option, value)
|
||||
for torrent in self.downloads.values():
|
||||
if torrent is not None:
|
||||
for option, value in newvalues.iteritems():
|
||||
torrent.set_option(option, value)
|
||||
|
||||
# rest are callbacks from torrent instances
|
||||
|
||||
def started(self, torrent):
|
||||
self.hashcheck_current = None
|
||||
self.check_hashcheck_queue()
|
||||
|
||||
def failed(self, torrent, is_external):
|
||||
infohash = torrent.infohash
|
||||
self.was_stopped(infohash)
|
||||
if self.torrent_cache.has_key(infohash):
|
||||
self.output.message('DIED: "'+self.torrent_cache[infohash]['path']+'"')
|
||||
|
||||
def exception(self, torrent, text):
|
||||
self.exchandler(text)
|
263
BitTorrent/makemetafile.py
Executable file
263
BitTorrent/makemetafile.py
Executable file
@ -0,0 +1,263 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Bram Cohen
|
||||
|
||||
from __future__ import division
|
||||
|
||||
import os
|
||||
import sys
|
||||
from sha import sha
|
||||
from time import time
|
||||
from threading import Event
|
||||
|
||||
from BitTorrent.bencode import bencode, bdecode
|
||||
from BitTorrent.btformats import check_info
|
||||
from BitTorrent.parseargs import parseargs, printHelp
|
||||
from BitTorrent.obsoletepythonsupport import *
|
||||
from BitTorrent import BTFailure
|
||||
|
||||
from khashmir.node import Node
|
||||
from khashmir.ktable import KTable
|
||||
from khashmir.util import packPeers, compact_peer_info
|
||||
|
||||
ignore = ['core', 'CVS', 'Thumbs.db', 'desktop.ini']
|
||||
|
||||
noncharacter_translate = {}
|
||||
for i in range(0xD800, 0xE000):
|
||||
noncharacter_translate[i] = None
|
||||
for i in range(0xFDD0, 0xFDF0):
|
||||
noncharacter_translate[i] = None
|
||||
for i in (0xFFFE, 0xFFFF):
|
||||
noncharacter_translate[i] = None
|
||||
|
||||
del i
|
||||
|
||||
def dummy(v):
|
||||
pass
|
||||
|
||||
def make_meta_files(url,
|
||||
files,
|
||||
flag=Event(),
|
||||
progressfunc=dummy,
|
||||
filefunc=dummy,
|
||||
piece_len_pow2=None,
|
||||
target=None,
|
||||
comment=None,
|
||||
filesystem_encoding=None,
|
||||
use_tracker=True,
|
||||
data_dir = None):
|
||||
if len(files) > 1 and target:
|
||||
raise BTFailure(_("You can't specify the name of the .torrent file "
|
||||
"when generating multiple torrents at once"))
|
||||
|
||||
if not filesystem_encoding:
|
||||
try:
|
||||
getattr(sys, 'getfilesystemencoding')
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
filesystem_encoding = sys.getfilesystemencoding()
|
||||
if not filesystem_encoding:
|
||||
filesystem_encoding = 'ascii'
|
||||
try:
|
||||
'a1'.decode(filesystem_encoding)
|
||||
except:
|
||||
raise BTFailure(_('Filesystem encoding "%s" is not supported in this version')
|
||||
% filesystem_encoding)
|
||||
files.sort()
|
||||
ext = '.torrent'
|
||||
|
||||
togen = []
|
||||
for f in files:
|
||||
if not f.endswith(ext):
|
||||
togen.append(f)
|
||||
|
||||
total = 0
|
||||
for f in togen:
|
||||
total += calcsize(f)
|
||||
|
||||
subtotal = [0]
|
||||
def callback(x):
|
||||
subtotal[0] += x
|
||||
progressfunc(subtotal[0] / total)
|
||||
for f in togen:
|
||||
if flag.isSet():
|
||||
break
|
||||
t = os.path.split(f)
|
||||
if t[1] == '':
|
||||
f = t[0]
|
||||
filefunc(f)
|
||||
if use_tracker:
|
||||
make_meta_file(f, url, flag=flag, progress=callback,
|
||||
piece_len_exp=piece_len_pow2, target=target,
|
||||
comment=comment, encoding=filesystem_encoding)
|
||||
else:
|
||||
make_meta_file_dht(f, url, flag=flag, progress=callback,
|
||||
piece_len_exp=piece_len_pow2, target=target,
|
||||
comment=comment, encoding=filesystem_encoding, data_dir=data_dir)
|
||||
|
||||
|
||||
def make_meta_file(path, url, piece_len_exp, flag=Event(), progress=dummy,
|
||||
comment=None, target=None, encoding='ascii'):
|
||||
data = {'announce': url.strip(),'creation date': int(time())}
|
||||
piece_length = 2 ** piece_len_exp
|
||||
a, b = os.path.split(path)
|
||||
if not target:
|
||||
if b == '':
|
||||
f = a + '.torrent'
|
||||
else:
|
||||
f = os.path.join(a, b + '.torrent')
|
||||
else:
|
||||
f = target
|
||||
info = makeinfo(path, piece_length, flag, progress, encoding)
|
||||
if flag.isSet():
|
||||
return
|
||||
check_info(info)
|
||||
h = file(f, 'wb')
|
||||
|
||||
data['info'] = info
|
||||
if comment:
|
||||
data['comment'] = comment
|
||||
h.write(bencode(data))
|
||||
h.close()
|
||||
|
||||
def make_meta_file_dht(path, nodes, piece_len_exp, flag=Event(), progress=dummy,
|
||||
comment=None, target=None, encoding='ascii', data_dir=None):
|
||||
# if nodes is empty, then get them out of the routing table in data_dir
|
||||
# else, expect nodes to be a string of comma seperated <ip>:<port> pairs
|
||||
# this has a lot of duplicated code from make_meta_file
|
||||
piece_length = 2 ** piece_len_exp
|
||||
a, b = os.path.split(path)
|
||||
if not target:
|
||||
if b == '':
|
||||
f = a + '.torrent'
|
||||
else:
|
||||
f = os.path.join(a, b + '.torrent')
|
||||
else:
|
||||
f = target
|
||||
info = makeinfo(path, piece_length, flag, progress, encoding)
|
||||
if flag.isSet():
|
||||
return
|
||||
check_info(info)
|
||||
info_hash = sha(bencode(info)).digest()
|
||||
|
||||
if not nodes:
|
||||
x = open(os.path.join(data_dir, 'routing_table'), 'rb')
|
||||
d = bdecode(x.read())
|
||||
x.close()
|
||||
t = KTable(Node().initWithDict({'id':d['id'], 'host':'127.0.0.1','port': 0}))
|
||||
for n in d['rt']:
|
||||
t.insertNode(Node().initWithDict(n))
|
||||
nodes = [(node.host, node.port) for node in t.findNodes(info_hash) if node.host != '127.0.0.1']
|
||||
else:
|
||||
nodes = [(a[0], int(a[1])) for a in [node.strip().split(":") for node in nodes.split(",")]]
|
||||
data = {'nodes': nodes, 'creation date': int(time())}
|
||||
h = file(f, 'wb')
|
||||
|
||||
data['info'] = info
|
||||
if comment:
|
||||
data['comment'] = comment
|
||||
h.write(bencode(data))
|
||||
h.close()
|
||||
|
||||
|
||||
def calcsize(path):
|
||||
total = 0
|
||||
for s in subfiles(os.path.abspath(path)):
|
||||
total += os.path.getsize(s[1])
|
||||
return total
|
||||
|
||||
def makeinfo(path, piece_length, flag, progress, encoding):
|
||||
def to_utf8(name):
|
||||
try:
|
||||
u = name.decode(encoding)
|
||||
except Exception, e:
|
||||
raise BTFailure(_('Could not convert file/directory name "%s" to '
|
||||
'utf-8 (%s). Either the assumed filesystem '
|
||||
'encoding "%s" is wrong or the filename contains '
|
||||
'illegal bytes.') % (name, str(e), encoding))
|
||||
if u.translate(noncharacter_translate) != u:
|
||||
raise BTFailure(_('File/directory name "%s" contains reserved '
|
||||
'unicode values that do not correspond to '
|
||||
'characters.') % name)
|
||||
return u.encode('utf-8')
|
||||
path = os.path.abspath(path)
|
||||
if os.path.isdir(path):
|
||||
subs = subfiles(path)
|
||||
subs.sort()
|
||||
pieces = []
|
||||
sh = sha()
|
||||
done = 0
|
||||
fs = []
|
||||
totalsize = 0.0
|
||||
totalhashed = 0
|
||||
for p, f in subs:
|
||||
totalsize += os.path.getsize(f)
|
||||
|
||||
for p, f in subs:
|
||||
pos = 0
|
||||
size = os.path.getsize(f)
|
||||
p2 = [to_utf8(name) for name in p]
|
||||
fs.append({'length': size, 'path': p2})
|
||||
h = file(f, 'rb')
|
||||
while pos < size:
|
||||
a = min(size - pos, piece_length - done)
|
||||
sh.update(h.read(a))
|
||||
if flag.isSet():
|
||||
return
|
||||
done += a
|
||||
pos += a
|
||||
totalhashed += a
|
||||
|
||||
if done == piece_length:
|
||||
pieces.append(sh.digest())
|
||||
done = 0
|
||||
sh = sha()
|
||||
progress(a)
|
||||
h.close()
|
||||
if done > 0:
|
||||
pieces.append(sh.digest())
|
||||
return {'pieces': ''.join(pieces),
|
||||
'piece length': piece_length, 'files': fs,
|
||||
'name': to_utf8(os.path.split(path)[1])}
|
||||
else:
|
||||
size = os.path.getsize(path)
|
||||
pieces = []
|
||||
p = 0
|
||||
h = file(path, 'rb')
|
||||
while p < size:
|
||||
x = h.read(min(piece_length, size - p))
|
||||
if flag.isSet():
|
||||
return
|
||||
pieces.append(sha(x).digest())
|
||||
p += piece_length
|
||||
if p > size:
|
||||
p = size
|
||||
progress(min(piece_length, size - p))
|
||||
h.close()
|
||||
return {'pieces': ''.join(pieces),
|
||||
'piece length': piece_length, 'length': size,
|
||||
'name': to_utf8(os.path.split(path)[1])}
|
||||
|
||||
def subfiles(d):
|
||||
r = []
|
||||
stack = [([], d)]
|
||||
while stack:
|
||||
p, n = stack.pop()
|
||||
if os.path.isdir(n):
|
||||
for s in os.listdir(n):
|
||||
if s not in ignore and not s.startswith('.'):
|
||||
stack.append((p + [s], os.path.join(n, s)))
|
||||
else:
|
||||
r.append((p, n))
|
||||
return r
|
33
BitTorrent/obsoletepythonsupport.py
Executable file
33
BitTorrent/obsoletepythonsupport.py
Executable file
@ -0,0 +1,33 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
from __future__ import generators
|
||||
|
||||
import sys
|
||||
|
||||
if sys.version_info < (2, 3):
|
||||
# Allow int() to create numbers larger than "small ints".
|
||||
# This is NOT SAFE if int is used as the name of the type instead
|
||||
# (as in "type(x) in (int, long)").
|
||||
int = long
|
||||
|
||||
def enumerate(x):
|
||||
i = 0
|
||||
for y in x:
|
||||
yield (i, y)
|
||||
i += 1
|
||||
|
||||
def sum(seq):
|
||||
r = 0
|
||||
for x in seq:
|
||||
r += x
|
||||
return r
|
||||
|
||||
del sys
|
187
BitTorrent/parseargs.py
Executable file
187
BitTorrent/parseargs.py
Executable file
@ -0,0 +1,187 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Bill Bumgarner and Bram Cohen
|
||||
|
||||
from types import *
|
||||
from cStringIO import StringIO
|
||||
|
||||
from BitTorrent.obsoletepythonsupport import *
|
||||
|
||||
from BitTorrent.defaultargs import MyBool, MYTRUE
|
||||
from BitTorrent import BTFailure
|
||||
from BitTorrent.bencode import bdecode
|
||||
from BitTorrent.platform import is_frozen_exe
|
||||
from BitTorrent.RawServer_magic import switch_rawserver
|
||||
|
||||
def makeHelp(uiname, defaults):
|
||||
ret = ''
|
||||
ret += (_("Usage: %s ") % uiname)
|
||||
if uiname.startswith('launchmany'):
|
||||
ret += _("[OPTIONS] [TORRENTDIRECTORY]\n\n")
|
||||
ret += _("If a non-option argument is present it's taken as the value\n"
|
||||
"of the torrent_dir option.\n")
|
||||
elif uiname == 'bittorrent':
|
||||
ret += _("[OPTIONS] [TORRENTFILES]\n")
|
||||
elif uiname.startswith('bittorrent'):
|
||||
ret += _("[OPTIONS] [TORRENTFILE]\n")
|
||||
elif uiname.startswith('maketorrent'):
|
||||
ret += _("[OPTION] TRACKER_URL FILE [FILE]\n")
|
||||
ret += '\n'
|
||||
ret += _("arguments are -\n") + formatDefinitions(defaults, 80)
|
||||
return ret
|
||||
|
||||
def printHelp(uiname, defaults):
|
||||
if uiname in ('bittorrent','maketorrent') and is_frozen_exe:
|
||||
from BitTorrent.GUI import HelpWindow
|
||||
HelpWindow(None, makeHelp(uiname, defaults))
|
||||
else:
|
||||
print makeHelp(uiname, defaults)
|
||||
|
||||
def formatDefinitions(options, COLS):
|
||||
s = StringIO()
|
||||
indent = " " * 10
|
||||
width = COLS - 11
|
||||
|
||||
if width < 15:
|
||||
width = COLS - 2
|
||||
indent = " "
|
||||
|
||||
for option in options:
|
||||
(longname, default, doc) = option
|
||||
if doc == '':
|
||||
continue
|
||||
s.write('--' + longname)
|
||||
is_boolean = type(default) is MyBool
|
||||
if is_boolean:
|
||||
s.write(', --no_' + longname)
|
||||
else:
|
||||
s.write(' <arg>')
|
||||
s.write('\n')
|
||||
if default is not None:
|
||||
doc += _(" (defaults to ") + repr(default) + ')'
|
||||
i = 0
|
||||
for word in doc.split():
|
||||
if i == 0:
|
||||
s.write(indent + word)
|
||||
i = len(word)
|
||||
elif i + len(word) >= width:
|
||||
s.write('\n' + indent + word)
|
||||
i = len(word)
|
||||
else:
|
||||
s.write(' ' + word)
|
||||
i += len(word) + 1
|
||||
s.write('\n\n')
|
||||
return s.getvalue()
|
||||
|
||||
def usage(str):
|
||||
raise BTFailure(str)
|
||||
|
||||
def format_key(key):
|
||||
if len(key) == 1:
|
||||
return '-%s'%key
|
||||
else:
|
||||
return '--%s'%key
|
||||
|
||||
def parseargs(argv, options, minargs=None, maxargs=None, presets=None):
|
||||
config = {}
|
||||
for option in options:
|
||||
longname, default, doc = option
|
||||
config[longname] = default
|
||||
args = []
|
||||
pos = 0
|
||||
if presets is None:
|
||||
presets = {}
|
||||
else:
|
||||
presets = presets.copy()
|
||||
while pos < len(argv):
|
||||
if argv[pos][:1] != '-': # not a cmdline option
|
||||
args.append(argv[pos])
|
||||
pos += 1
|
||||
else:
|
||||
key, value = None, None
|
||||
if argv[pos].startswith('--'): # --aaa 1
|
||||
if argv[pos].startswith('--no_'):
|
||||
key = argv[pos][5:]
|
||||
boolval = False
|
||||
else:
|
||||
key = argv[pos][2:]
|
||||
boolval = True
|
||||
if key not in config:
|
||||
raise BTFailure(_("unknown key ") + format_key(key))
|
||||
if type(config[key]) is MyBool: # boolean cmd line switch, no value
|
||||
value = boolval
|
||||
pos += 1
|
||||
else: # --argument value
|
||||
if pos == len(argv) - 1:
|
||||
usage(_("parameter passed in at end with no value"))
|
||||
key, value = argv[pos][2:], argv[pos+1]
|
||||
pos += 2
|
||||
elif argv[pos][:1] == '-':
|
||||
key = argv[pos][1:2]
|
||||
if len(argv[pos]) > 2: # -a1
|
||||
value = argv[pos][2:]
|
||||
pos += 1
|
||||
else: # -a 1
|
||||
if pos == len(argv) - 1:
|
||||
usage(_("parameter passed in at end with no value"))
|
||||
value = argv[pos+1]
|
||||
pos += 2
|
||||
else:
|
||||
raise BTFailure(_("command line parsing failed at ")+argv[pos])
|
||||
|
||||
presets[key] = value
|
||||
parse_options(config, presets)
|
||||
config.update(presets)
|
||||
for key, value in config.items():
|
||||
if value is None:
|
||||
usage(_("Option %s is required.") % format_key(key))
|
||||
if minargs is not None and len(args) < minargs:
|
||||
usage(_("Must supply at least %d arguments.") % minargs)
|
||||
if maxargs is not None and len(args) > maxargs:
|
||||
usage(_("Too many arguments - %d maximum.") % maxargs)
|
||||
|
||||
if config.has_key('twisted'):
|
||||
if config['twisted'] == 0:
|
||||
switch_rawserver('untwisted')
|
||||
elif config['twisted'] == 1:
|
||||
switch_rawserver('twisted')
|
||||
|
||||
return (config, args)
|
||||
|
||||
def parse_options(defaults, newvalues):
|
||||
for key, value in newvalues.iteritems():
|
||||
if not defaults.has_key(key):
|
||||
raise BTFailure(_("unknown key ") + format_key(key))
|
||||
try:
|
||||
t = type(defaults[key])
|
||||
if t is MyBool:
|
||||
if value in ('True', '1', MYTRUE, True):
|
||||
value = True
|
||||
else:
|
||||
value = False
|
||||
newvalues[key] = value
|
||||
elif t in (StringType, NoneType):
|
||||
newvalues[key] = value
|
||||
elif t in (IntType, LongType):
|
||||
if value == 'False':
|
||||
newvalues[key] == 0
|
||||
elif value == 'True':
|
||||
newvalues[key] == 1
|
||||
else:
|
||||
newvalues[key] = int(value)
|
||||
elif t is FloatType:
|
||||
newvalues[key] = float(value)
|
||||
else:
|
||||
raise TypeError, str(t)
|
||||
|
||||
except ValueError, e:
|
||||
raise BTFailure(_("wrong format of %s - %s") % (format_key(key), str(e)))
|
||||
|
150
BitTorrent/parsedir.py
Executable file
150
BitTorrent/parsedir.py
Executable file
@ -0,0 +1,150 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by John Hoffman and Uoti Urpala
|
||||
|
||||
import os
|
||||
from sha import sha
|
||||
|
||||
from BitTorrent.bencode import bencode, bdecode
|
||||
from BitTorrent.btformats import check_message
|
||||
|
||||
NOISY = False
|
||||
|
||||
def parsedir(directory, parsed, files, blocked, errfunc,
|
||||
include_metainfo=True):
|
||||
if NOISY:
|
||||
errfunc('checking dir')
|
||||
dirs_to_check = [directory]
|
||||
new_files = {}
|
||||
new_blocked = {}
|
||||
while dirs_to_check: # first, recurse directories and gather torrents
|
||||
directory = dirs_to_check.pop()
|
||||
newtorrents = False
|
||||
try:
|
||||
dir_contents = os.listdir(directory)
|
||||
except (IOError, OSError), e:
|
||||
errfunc(_("Could not read directory ") + directory)
|
||||
continue
|
||||
for f in dir_contents:
|
||||
if f.endswith('.torrent'):
|
||||
newtorrents = True
|
||||
p = os.path.join(directory, f)
|
||||
try:
|
||||
new_files[p] = [(os.path.getmtime(p),os.path.getsize(p)),0]
|
||||
except (IOError, OSError), e:
|
||||
errfunc(_("Could not stat ") + p + " : " + str(e))
|
||||
if not newtorrents:
|
||||
for f in dir_contents:
|
||||
p = os.path.join(directory, f)
|
||||
if os.path.isdir(p):
|
||||
dirs_to_check.append(p)
|
||||
|
||||
new_parsed = {}
|
||||
to_add = []
|
||||
added = {}
|
||||
removed = {}
|
||||
# files[path] = [(modification_time, size), hash], hash is 0 if the file
|
||||
# has not been successfully parsed
|
||||
for p,v in new_files.items(): # re-add old items and check for changes
|
||||
oldval = files.get(p)
|
||||
if oldval is None: # new file
|
||||
to_add.append(p)
|
||||
continue
|
||||
h = oldval[1]
|
||||
if oldval[0] == v[0]: # file is unchanged from last parse
|
||||
if h:
|
||||
if p in blocked: # parseable + blocked means duplicate
|
||||
to_add.append(p) # other duplicate may have gone away
|
||||
else:
|
||||
new_parsed[h] = parsed[h]
|
||||
new_files[p] = oldval
|
||||
else:
|
||||
new_blocked[p] = None # same broken unparseable file
|
||||
continue
|
||||
if p not in blocked and h in parsed: # modified; remove+add
|
||||
if NOISY:
|
||||
errfunc(_("removing %s (will re-add)") % p)
|
||||
removed[h] = parsed[h]
|
||||
to_add.append(p)
|
||||
|
||||
to_add.sort()
|
||||
for p in to_add: # then, parse new and changed torrents
|
||||
new_file = new_files[p]
|
||||
v = new_file[0]
|
||||
if new_file[1] in new_parsed: # duplicate
|
||||
if p not in blocked or files[p][0] != v:
|
||||
errfunc(_("**warning** %s is a duplicate torrent for %s") %
|
||||
(p, new_parsed[new_file[1]]['path']))
|
||||
new_blocked[p] = None
|
||||
continue
|
||||
|
||||
if NOISY:
|
||||
errfunc('adding '+p)
|
||||
try:
|
||||
ff = open(p, 'rb')
|
||||
d = bdecode(ff.read())
|
||||
check_message(d)
|
||||
h = sha(bencode(d['info'])).digest()
|
||||
new_file[1] = h
|
||||
if new_parsed.has_key(h):
|
||||
errfunc(_("**warning** %s is a duplicate torrent for %s") %
|
||||
(p, new_parsed[h]['path']))
|
||||
new_blocked[p] = None
|
||||
continue
|
||||
|
||||
a = {}
|
||||
a['path'] = p
|
||||
f = os.path.basename(p)
|
||||
a['file'] = f
|
||||
i = d['info']
|
||||
l = 0
|
||||
nf = 0
|
||||
if i.has_key('length'):
|
||||
l = i.get('length',0)
|
||||
nf = 1
|
||||
elif i.has_key('files'):
|
||||
for li in i['files']:
|
||||
nf += 1
|
||||
if li.has_key('length'):
|
||||
l += li['length']
|
||||
a['numfiles'] = nf
|
||||
a['length'] = l
|
||||
a['name'] = i.get('name', f)
|
||||
def setkey(k, d = d, a = a):
|
||||
if d.has_key(k):
|
||||
a[k] = d[k]
|
||||
setkey('failure reason')
|
||||
setkey('warning message')
|
||||
setkey('announce-list')
|
||||
if include_metainfo:
|
||||
a['metainfo'] = d
|
||||
except:
|
||||
errfunc(_("**warning** %s has errors") % p)
|
||||
new_blocked[p] = None
|
||||
continue
|
||||
try:
|
||||
ff.close()
|
||||
except:
|
||||
pass
|
||||
if NOISY:
|
||||
errfunc(_("... successful"))
|
||||
new_parsed[h] = a
|
||||
added[h] = a
|
||||
|
||||
for p,v in files.iteritems(): # and finally, mark removed torrents
|
||||
if p not in new_files and p not in blocked:
|
||||
if NOISY:
|
||||
errfunc(_("removing %s") % p)
|
||||
removed[v[1]] = parsed[v[1]]
|
||||
|
||||
if NOISY:
|
||||
errfunc(_("done checking"))
|
||||
return (new_parsed, new_files, new_blocked, added, removed)
|
290
BitTorrent/platform.py
Executable file
290
BitTorrent/platform.py
Executable file
@ -0,0 +1,290 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Matt Chisholm and Uoti Urpala
|
||||
|
||||
# This module is strictly for cross platform compatibility items and
|
||||
# should not import anything from other BitTorrent modules.
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import gettext
|
||||
import locale
|
||||
if os.name == 'nt':
|
||||
import win32api
|
||||
from win32com.shell import shellcon, shell
|
||||
elif os.name == 'posix' and os.uname()[0] == 'Darwin':
|
||||
has_pyobjc = False
|
||||
try:
|
||||
from Foundation import NSBundle
|
||||
has_pyobjc = True
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from BitTorrent import app_name, version
|
||||
|
||||
if sys.platform.startswith('win'):
|
||||
bttime = time.clock
|
||||
else:
|
||||
bttime = time.time
|
||||
|
||||
is_frozen_exe = (os.name == 'nt') and hasattr(sys, 'frozen') and (sys.frozen == 'windows_exe')
|
||||
|
||||
os_name = os.name
|
||||
os_version = None
|
||||
if os_name == 'nt':
|
||||
wh = {(1, 4, 0): "95",
|
||||
(1, 4, 10): "98",
|
||||
(1, 4, 90): "ME",
|
||||
(2, 4, 0): "NT",
|
||||
(2, 5, 0): "2000",
|
||||
(2, 5, 1): "XP" ,
|
||||
(2, 5, 2): "2003",
|
||||
}
|
||||
wv = sys.getwindowsversion()
|
||||
wk = (wv[3], wv[0], wv[1])
|
||||
if wh.has_key(wk):
|
||||
os_version = wh[wk]
|
||||
del wh, wv, wk
|
||||
elif os_name == 'posix':
|
||||
os_version = os.uname()[0]
|
||||
|
||||
user_agent = "M" + version.replace('.', '-') + "--(%s/%s)" % (os_name, os_version)
|
||||
|
||||
def calc_unix_dirs():
|
||||
appdir = '%s-%s'%(app_name, version)
|
||||
ip = os.path.join('share', 'pixmaps', appdir)
|
||||
dp = os.path.join('share', 'doc' , appdir)
|
||||
lp = os.path.join('share', 'locale')
|
||||
return ip, dp, lp
|
||||
|
||||
app_root = os.path.split(os.path.abspath(sys.argv[0]))[0]
|
||||
doc_root = app_root
|
||||
osx = False
|
||||
if os.name == 'posix':
|
||||
if os.uname()[0] == "Darwin":
|
||||
doc_root = app_root = app_root.encode('utf8')
|
||||
if has_pyobjc:
|
||||
doc_root = NSBundle.mainBundle().resourcePath()
|
||||
osx = True
|
||||
image_root = os.path.join(app_root, 'images')
|
||||
locale_root = os.path.join(app_root, 'locale')
|
||||
|
||||
if not os.access(image_root, os.F_OK) or not os.access(locale_root, os.F_OK):
|
||||
# we guess that probably we are installed on *nix in this case
|
||||
# (I have no idea whether this is right or not -- matt)
|
||||
if app_root[-4:] == '/bin':
|
||||
# yep, installed on *nix
|
||||
installed_prefix = app_root[:-4]
|
||||
image_root, doc_root, locale_root = map(
|
||||
lambda p: os.path.join(installed_prefix, p), calc_unix_dirs()
|
||||
)
|
||||
|
||||
# a cross-platform way to get user's config directory
|
||||
def get_config_dir():
|
||||
shellvars = ['${APPDATA}', '${HOME}', '${USERPROFILE}']
|
||||
dir_root = get_dir_root(shellvars)
|
||||
|
||||
if (dir_root is None) and (os.name == 'nt'):
|
||||
app_dir = get_shell_dir(shellcon.CSIDL_APPDATA)
|
||||
if app_dir is not None:
|
||||
dir_root = app_dir
|
||||
|
||||
if dir_root is None and os.name == 'nt':
|
||||
tmp_dir_root = os.path.split(sys.executable)[0]
|
||||
if os.access(tmp_dir_root, os.R_OK|os.W_OK):
|
||||
dir_root = tmp_dir_root
|
||||
|
||||
return dir_root
|
||||
|
||||
def get_cache_dir():
|
||||
dir = None
|
||||
if os.name == 'nt':
|
||||
dir = get_shell_dir(shellcon.CSIDL_INTERNET_CACHE)
|
||||
return dir
|
||||
|
||||
def get_home_dir():
|
||||
shellvars = ['${HOME}', '${USERPROFILE}']
|
||||
dir_root = get_dir_root(shellvars)
|
||||
|
||||
if (dir_root is None) and (os.name == 'nt'):
|
||||
dir = get_shell_dir(shellcon.CSIDL_PROFILE)
|
||||
if dir is None:
|
||||
# there's no clear best fallback here
|
||||
# MS discourages you from writing directly in the home dir,
|
||||
# and sometimes (i.e. win98) there isn't one
|
||||
dir = get_shell_dir(shellcon.CSIDL_DESKTOPDIRECTORY)
|
||||
|
||||
dir_root = dir
|
||||
|
||||
return dir_root
|
||||
|
||||
def get_temp_dir():
|
||||
shellvars = ['${TMP}', '${TEMP}']
|
||||
dir_root = get_dir_root(shellvars, default_to_home=False)
|
||||
|
||||
#this method is preferred to the envvars
|
||||
if os.name == 'nt':
|
||||
try_dir_root = win32api.GetTempPath()
|
||||
if try_dir_root is not None:
|
||||
dir_root = try_dir_root
|
||||
|
||||
if dir_root is None:
|
||||
try_dir_root = None
|
||||
if os.name == 'nt':
|
||||
# this should basically never happen. GetTempPath always returns something
|
||||
try_dir_root = r'C:\WINDOWS\Temp'
|
||||
elif os.name == 'posix':
|
||||
try_dir_root = '/tmp'
|
||||
if (try_dir_root is not None and
|
||||
os.path.isdir(try_dir_root) and
|
||||
os.access(try_dir_root, os.R_OK|os.W_OK)):
|
||||
dir_root = try_dir_root
|
||||
return dir_root
|
||||
|
||||
def get_dir_root(shellvars, default_to_home=True):
|
||||
def check_sysvars(x):
|
||||
y = os.path.expandvars(x)
|
||||
if y != x and os.path.isdir(y):
|
||||
return y
|
||||
return None
|
||||
|
||||
dir_root = None
|
||||
for d in shellvars:
|
||||
dir_root = check_sysvars(d)
|
||||
if dir_root is not None:
|
||||
break
|
||||
else:
|
||||
if default_to_home:
|
||||
dir_root = os.path.expanduser('~')
|
||||
if dir_root == '~' or not os.path.isdir(dir_root):
|
||||
dir_root = None
|
||||
return dir_root
|
||||
|
||||
# this function is the preferred way to get windows' paths
|
||||
def get_shell_dir(value):
|
||||
dir = None
|
||||
if os.name == 'nt':
|
||||
try:
|
||||
dir = shell.SHGetFolderPath(0, value, 0, 0)
|
||||
dir = dir.encode('mbcs')
|
||||
except:
|
||||
pass
|
||||
return dir
|
||||
|
||||
def path_wrap(path):
|
||||
return path
|
||||
|
||||
if os.name == 'nt':
|
||||
def path_wrap(path):
|
||||
return path.decode('mbcs').encode('utf-8')
|
||||
|
||||
def spawn(torrentqueue, cmd, *args):
|
||||
ext = ''
|
||||
if is_frozen_exe:
|
||||
ext = '.exe'
|
||||
path = os.path.join(app_root,cmd+ext)
|
||||
if not os.access(path, os.F_OK):
|
||||
if os.access(path+'.py', os.F_OK):
|
||||
path = path+'.py'
|
||||
args = [path] + list(args) # $0
|
||||
if os.name == 'nt':
|
||||
# do proper argument quoting since exec/spawn on Windows doesn't
|
||||
args = ['"%s"'%a.replace('"', '\"') for a in args]
|
||||
argstr = ' '.join(args[1:])
|
||||
# use ShellExecute instead of spawn*() because we don't want
|
||||
# handles (like the controlsocket) to be duplicated
|
||||
win32api.ShellExecute(0, "open", args[0], argstr, None, 1) # 1 == SW_SHOW
|
||||
else:
|
||||
if os.access(path, os.X_OK):
|
||||
forkback = os.fork()
|
||||
if forkback == 0:
|
||||
if torrentqueue is not None:
|
||||
#BUG: should we do this?
|
||||
#torrentqueue.set_done()
|
||||
torrentqueue.wrapped.controlsocket.close_socket()
|
||||
os.execl(path, *args)
|
||||
else:
|
||||
#BUG: what should we do here?
|
||||
pass
|
||||
|
||||
|
||||
def _gettext_install(domain, localedir=None, languages=None, unicode=False):
|
||||
# gettext on win32 does not use locale.getdefaultlocale() by default
|
||||
# other os's will fall through and gettext.find() will do this task
|
||||
if os_name == 'nt':
|
||||
# this code is straight out of gettext.find()
|
||||
if languages is None:
|
||||
languages = []
|
||||
for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'):
|
||||
val = os.environ.get(envar)
|
||||
if val:
|
||||
languages = val.split(':')
|
||||
break
|
||||
|
||||
# this is the important addition - since win32 does not typically
|
||||
# have any enironment variable set, append the default locale before 'C'
|
||||
languages.append(locale.getdefaultlocale()[0])
|
||||
|
||||
if 'C' not in languages:
|
||||
languages.append('C')
|
||||
|
||||
# this code is straight out of gettext.install
|
||||
t = gettext.translation(domain, localedir, languages=languages, fallback=True)
|
||||
t.install(unicode)
|
||||
|
||||
|
||||
def language_path():
|
||||
config_dir = get_config_dir()
|
||||
lang_file_name = os.path.join(config_dir, '.bittorrent', 'data', 'language')
|
||||
return lang_file_name
|
||||
|
||||
|
||||
def read_language_file():
|
||||
lang_file_name = language_path()
|
||||
lang = None
|
||||
if os.access(lang_file_name, os.F_OK|os.R_OK):
|
||||
mode = 'r'
|
||||
if sys.version_info >= (2, 3):
|
||||
mode = 'U'
|
||||
lang_file = open(lang_file_name, mode)
|
||||
lang_line = lang_file.readline()
|
||||
lang_file.close()
|
||||
if lang_line:
|
||||
lang = ''
|
||||
for i in lang_line[:5]:
|
||||
if not i.isalpha() and i != '_':
|
||||
break
|
||||
lang += i
|
||||
if lang == '':
|
||||
lang = None
|
||||
return lang
|
||||
|
||||
|
||||
def write_language_file(lang):
|
||||
lang_file_name = language_path()
|
||||
lang_file = open(lang_file_name, 'w')
|
||||
lang_file.write(lang)
|
||||
lang_file.close()
|
||||
|
||||
|
||||
def install_translation():
|
||||
languages = None
|
||||
try:
|
||||
lang = read_language_file()
|
||||
if lang is not None:
|
||||
languages = [lang,]
|
||||
except:
|
||||
#pass
|
||||
from traceback import print_exc
|
||||
print_exc()
|
||||
_gettext_install('bittorrent', locale_root, languages=languages)
|
88
BitTorrent/prefs.py
Executable file
88
BitTorrent/prefs.py
Executable file
@ -0,0 +1,88 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
|
||||
class Preferences(object):
|
||||
def __init__(self, parent=None):
|
||||
self._parent = None
|
||||
self._options = {}
|
||||
if parent:
|
||||
self._parent = parent
|
||||
|
||||
def initWithDict(self, dict):
|
||||
self._options = dict
|
||||
return self
|
||||
|
||||
def getDict(self):
|
||||
return dict(self._options)
|
||||
|
||||
def getDifference(self):
|
||||
if self._parent:
|
||||
return dict([(x, y) for x, y in self._options.items() if y != self._parent.get(x, None)])
|
||||
else:
|
||||
return dict(self._options)
|
||||
|
||||
def __getitem__(self, option):
|
||||
if self._options.has_key(option):
|
||||
return self._options[option]
|
||||
elif self._parent:
|
||||
return self._parent[option]
|
||||
return None
|
||||
|
||||
def __setitem__(self, option, value):
|
||||
self._options.__setitem__(option, value)
|
||||
|
||||
def __len__(self):
|
||||
l = len(self._options)
|
||||
if self._parent:
|
||||
return l + len(self._parent)
|
||||
else:
|
||||
return l
|
||||
|
||||
def __delitem__(self, option):
|
||||
del(self._options[option])
|
||||
|
||||
def clear(self): self._options.clear()
|
||||
|
||||
def has_key(self, option):
|
||||
if self._options.has_key(option):
|
||||
return True
|
||||
elif self._parent:
|
||||
return self._parent.has_key(option)
|
||||
return False
|
||||
|
||||
def keys(self):
|
||||
l = self._options.keys()
|
||||
if self._parent:
|
||||
l += [key for key in self._parent.keys() if key not in l]
|
||||
return l
|
||||
|
||||
def values(self):
|
||||
l = self._options.values()
|
||||
if self._parent:
|
||||
l += [value for value in self._parent.values() if value not in l]
|
||||
return l
|
||||
|
||||
def items(self):
|
||||
l = self._options.items()
|
||||
if self._parent:
|
||||
l += [item for item in self._parent.items() if item not in l]
|
||||
return l
|
||||
|
||||
def __iter__(self): return self.iterkeys()
|
||||
def iteritems(self): return self.items().__iter__()
|
||||
def iterkeys(self): return self.keys().__iter__()
|
||||
def itervalues(self): return self.values().__iter__()
|
||||
def update(self, dict): return self._options.update(dict)
|
||||
|
||||
def get(self, key, failobj=None):
|
||||
if not self.has_key(key):
|
||||
return failobj
|
||||
return self[key]
|
68
BitTorrent/selectpoll.py
Executable file
68
BitTorrent/selectpoll.py
Executable file
@ -0,0 +1,68 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Bram Cohen
|
||||
|
||||
from select import select, error
|
||||
from time import sleep
|
||||
from types import IntType
|
||||
from bisect import bisect
|
||||
POLLIN = 1
|
||||
POLLOUT = 2
|
||||
POLLERR = 8
|
||||
POLLHUP = 16
|
||||
|
||||
|
||||
class poll(object):
|
||||
|
||||
def __init__(self):
|
||||
self.rlist = []
|
||||
self.wlist = []
|
||||
|
||||
def register(self, f, t):
|
||||
if type(f) != IntType:
|
||||
f = f.fileno()
|
||||
if (t & POLLIN) != 0:
|
||||
insert(self.rlist, f)
|
||||
else:
|
||||
remove(self.rlist, f)
|
||||
if (t & POLLOUT) != 0:
|
||||
insert(self.wlist, f)
|
||||
else:
|
||||
remove(self.wlist, f)
|
||||
|
||||
def unregister(self, f):
|
||||
if type(f) != IntType:
|
||||
f = f.fileno()
|
||||
remove(self.rlist, f)
|
||||
remove(self.wlist, f)
|
||||
|
||||
def poll(self, timeout = None):
|
||||
if self.rlist != [] or self.wlist != []:
|
||||
r, w, e = select(self.rlist, self.wlist, [], timeout)
|
||||
else:
|
||||
sleep(timeout)
|
||||
return []
|
||||
result = []
|
||||
for s in r:
|
||||
result.append((s, POLLIN))
|
||||
for s in w:
|
||||
result.append((s, POLLOUT))
|
||||
return result
|
||||
|
||||
def remove(list, item):
|
||||
i = bisect(list, item)
|
||||
if i > 0 and list[i-1] == item:
|
||||
del list[i-1]
|
||||
|
||||
def insert(list, item):
|
||||
i = bisect(list, item)
|
||||
if i == 0 or list[i-1] != item:
|
||||
list.insert(i, item)
|
874
BitTorrent/track.py
Executable file
874
BitTorrent/track.py
Executable file
@ -0,0 +1,874 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Bram Cohen and John Hoffman
|
||||
|
||||
import sys
|
||||
import os
|
||||
import signal
|
||||
import re
|
||||
from threading import Event
|
||||
from urlparse import urlparse
|
||||
from traceback import print_exc
|
||||
from time import time, gmtime, strftime, localtime
|
||||
from random import shuffle
|
||||
from types import StringType, IntType, LongType, ListType, DictType
|
||||
from binascii import b2a_hex
|
||||
from cStringIO import StringIO
|
||||
|
||||
from BitTorrent.obsoletepythonsupport import *
|
||||
|
||||
from BitTorrent.parseargs import parseargs, formatDefinitions
|
||||
from BitTorrent.RawServer_magic import RawServer
|
||||
from BitTorrent.HTTPHandler import HTTPHandler, months, weekdays
|
||||
from BitTorrent.parsedir import parsedir
|
||||
from BitTorrent.NatCheck import NatCheck
|
||||
from BitTorrent.bencode import bencode, bdecode, Bencached
|
||||
from BitTorrent.zurllib import quote, unquote
|
||||
from BitTorrent import version
|
||||
|
||||
|
||||
defaults = [
|
||||
('port', 80,
|
||||
_("Port to listen on.")),
|
||||
('dfile', None,
|
||||
_("file to store recent downloader info in")),
|
||||
('bind', '',
|
||||
_("ip to bind to locally")),
|
||||
('socket_timeout', 15,
|
||||
_("timeout for closing connections")),
|
||||
('close_with_rst', 0,
|
||||
_("close connections with RST and avoid the TCP TIME_WAIT state")),
|
||||
('save_dfile_interval', 5 * 60,
|
||||
_("seconds between saving dfile")),
|
||||
('timeout_downloaders_interval', 45 * 60,
|
||||
_("seconds between expiring downloaders")),
|
||||
('reannounce_interval', 30 * 60,
|
||||
_("seconds downloaders should wait between reannouncements")),
|
||||
('response_size', 50,
|
||||
_("default number of peers to send an info message to if the "
|
||||
"client does not specify a number")),
|
||||
('timeout_check_interval', 5,
|
||||
_("time to wait between checking if any connections have timed out")),
|
||||
('nat_check', 3,
|
||||
_("how many times to check if a downloader is behind a NAT "
|
||||
"(0 = don't check)")),
|
||||
('log_nat_checks', 0,
|
||||
_("whether to add entries to the log for nat-check results")),
|
||||
('min_time_between_log_flushes', 3.0,
|
||||
_("minimum time it must have been since the last flush to do "
|
||||
"another one")),
|
||||
('min_time_between_cache_refreshes', 600.0,
|
||||
_("minimum time in seconds before a cache is considered stale "
|
||||
"and is flushed")),
|
||||
('allowed_dir', '',
|
||||
_("only allow downloads for .torrents in this dir (and recursively in "
|
||||
"subdirectories of directories that have no .torrent files "
|
||||
"themselves). If set, torrents in this directory show up on "
|
||||
"infopage/scrape whether they have peers or not")),
|
||||
('parse_dir_interval', 60,
|
||||
_("how often to rescan the torrent directory, in seconds")),
|
||||
('allowed_controls', 0,
|
||||
_("allow special keys in torrents in the allowed_dir to affect "
|
||||
"tracker access")),
|
||||
('hupmonitor', 0,
|
||||
_("whether to reopen the log file upon receipt of HUP signal")),
|
||||
('show_infopage', 1,
|
||||
_("whether to display an info page when the tracker's root dir "
|
||||
"is loaded")),
|
||||
('infopage_redirect', '',
|
||||
_("a URL to redirect the info page to")),
|
||||
('show_names', 1,
|
||||
_("whether to display names from allowed dir")),
|
||||
('favicon', '',
|
||||
_("file containing x-icon data to return when browser requests "
|
||||
"favicon.ico")),
|
||||
('only_local_override_ip', 2,
|
||||
_("ignore the ip GET parameter from machines which aren't on "
|
||||
"local network IPs (0 = never, 1 = always, 2 = ignore if NAT "
|
||||
"checking is not enabled). HTTP proxy headers giving address "
|
||||
"of original client are treated the same as --ip.")),
|
||||
('logfile', '',
|
||||
_("file to write the tracker logs, use - for stdout (default)")),
|
||||
('allow_get', 0,
|
||||
_("use with allowed_dir; adds a /file?hash={hash} url that "
|
||||
"allows users to download the torrent file")),
|
||||
('keep_dead', 0,
|
||||
_("keep dead torrents after they expire (so they still show up on your "
|
||||
"/scrape and web page). Only matters if allowed_dir is not set")),
|
||||
('scrape_allowed', 'full',
|
||||
_("scrape access allowed (can be none, specific or full)")),
|
||||
('max_give', 200,
|
||||
_("maximum number of peers to give with any one request")),
|
||||
('twisted', -1,
|
||||
_("Use Twisted network libraries for network connections. 1 means use twisted, 0 means do not use twisted, -1 means autodetect, and prefer twisted")),
|
||||
]
|
||||
|
||||
def statefiletemplate(x):
|
||||
if type(x) != DictType:
|
||||
raise ValueError
|
||||
for cname, cinfo in x.items():
|
||||
if cname == 'peers':
|
||||
for y in cinfo.values(): # The 'peers' key is a dictionary of SHA hashes (torrent ids)
|
||||
if type(y) != DictType: # ... for the active torrents, and each is a dictionary
|
||||
raise ValueError
|
||||
for peerid, info in y.items(): # ... of client ids interested in that torrent
|
||||
if (len(peerid) != 20):
|
||||
raise ValueError
|
||||
if type(info) != DictType: # ... each of which is also a dictionary
|
||||
raise ValueError # ... which has an IP, a Port, and a Bytes Left count for that client for that torrent
|
||||
if type(info.get('ip', '')) != StringType:
|
||||
raise ValueError
|
||||
port = info.get('port')
|
||||
if type(port) not in (IntType, LongType) or port < 0:
|
||||
raise ValueError
|
||||
left = info.get('left')
|
||||
if type(left) not in (IntType, LongType) or left < 0:
|
||||
raise ValueError
|
||||
elif cname == 'completed':
|
||||
if (type(cinfo) != DictType): # The 'completed' key is a dictionary of SHA hashes (torrent ids)
|
||||
raise ValueError # ... for keeping track of the total completions per torrent
|
||||
for y in cinfo.values(): # ... each torrent has an integer value
|
||||
if type(y) not in (IntType,LongType):
|
||||
raise ValueError # ... for the number of reported completions for that torrent
|
||||
elif cname == 'allowed':
|
||||
if (type(cinfo) != DictType): # a list of info_hashes and included data
|
||||
raise ValueError
|
||||
if x.has_key('allowed_dir_files'):
|
||||
adlist = [z[1] for z in x['allowed_dir_files'].values()]
|
||||
for y in cinfo.keys(): # and each should have a corresponding key here
|
||||
if not y in adlist:
|
||||
raise ValueError
|
||||
elif cname == 'allowed_dir_files':
|
||||
if (type(cinfo) != DictType): # a list of files, their attributes and info hashes
|
||||
raise ValueError
|
||||
dirkeys = {}
|
||||
for y in cinfo.values(): # each entry should have a corresponding info_hash
|
||||
if not y[1]:
|
||||
continue
|
||||
if not x['allowed'].has_key(y[1]):
|
||||
raise ValueError
|
||||
if dirkeys.has_key(y[1]): # and each should have a unique info_hash
|
||||
raise ValueError
|
||||
dirkeys[y[1]] = 1
|
||||
|
||||
|
||||
alas = _("your file may exist elsewhere in the universe\nbut alas, not here\n")
|
||||
|
||||
def isotime(secs = None):
|
||||
if secs == None:
|
||||
secs = time()
|
||||
return strftime('%Y-%m-%d %H:%M UTC', gmtime(secs))
|
||||
|
||||
http_via_filter = re.compile(' for ([0-9.]+)\Z')
|
||||
|
||||
def _get_forwarded_ip(headers):
|
||||
if headers.has_key('http_x_forwarded_for'):
|
||||
header = headers['http_x_forwarded_for']
|
||||
try:
|
||||
x,y = header.split(',')
|
||||
except:
|
||||
return header
|
||||
if not is_local_ip(x):
|
||||
return x
|
||||
return y
|
||||
if headers.has_key('http_client_ip'):
|
||||
return headers['http_client_ip']
|
||||
if headers.has_key('http_via'):
|
||||
x = http_via_filter.search(headers['http_via'])
|
||||
try:
|
||||
return x.group(1)
|
||||
except:
|
||||
pass
|
||||
if headers.has_key('http_from'):
|
||||
return headers['http_from']
|
||||
return None
|
||||
|
||||
def get_forwarded_ip(headers):
|
||||
x = _get_forwarded_ip(headers)
|
||||
if x is None or not is_valid_ipv4(x) or is_local_ip(x):
|
||||
return None
|
||||
return x
|
||||
|
||||
def compact_peer_info(ip, port):
|
||||
try:
|
||||
s = ( ''.join([chr(int(i)) for i in ip.split('.')])
|
||||
+ chr((port & 0xFF00) >> 8) + chr(port & 0xFF) )
|
||||
if len(s) != 6:
|
||||
s = ''
|
||||
except:
|
||||
s = '' # not a valid IP, must be a domain name
|
||||
return s
|
||||
|
||||
def is_valid_ipv4(ip):
|
||||
a = ip.split('.')
|
||||
if len(a) != 4:
|
||||
return False
|
||||
try:
|
||||
for x in a:
|
||||
chr(int(x))
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
def is_local_ip(ip):
|
||||
try:
|
||||
v = [int(x) for x in ip.split('.')]
|
||||
if v[0] == 10 or v[0] == 127 or v[:2] in ([192, 168], [169, 254]):
|
||||
return 1
|
||||
if v[0] == 172 and v[1] >= 16 and v[1] <= 31:
|
||||
return 1
|
||||
except ValueError:
|
||||
return 0
|
||||
|
||||
|
||||
class Tracker(object):
|
||||
|
||||
def __init__(self, config, rawserver):
|
||||
self.config = config
|
||||
self.response_size = config['response_size']
|
||||
self.max_give = config['max_give']
|
||||
self.dfile = config['dfile']
|
||||
self.natcheck = config['nat_check']
|
||||
favicon = config['favicon']
|
||||
self.favicon = None
|
||||
if favicon:
|
||||
try:
|
||||
h = open(favicon,'r')
|
||||
self.favicon = h.read()
|
||||
h.close()
|
||||
except:
|
||||
print _("**warning** specified favicon file -- %s -- does not exist.") % favicon
|
||||
self.rawserver = rawserver
|
||||
self.cached = {} # format: infohash: [[time1, l1, s1], [time2, l2, s2], [time3, l3, s3]]
|
||||
self.cached_t = {} # format: infohash: [time, cache]
|
||||
self.times = {}
|
||||
self.state = {}
|
||||
self.seedcount = {}
|
||||
|
||||
self.only_local_override_ip = config['only_local_override_ip']
|
||||
if self.only_local_override_ip == 2:
|
||||
self.only_local_override_ip = not config['nat_check']
|
||||
|
||||
if os.path.exists(self.dfile):
|
||||
try:
|
||||
h = open(self.dfile, 'rb')
|
||||
ds = h.read()
|
||||
h.close()
|
||||
tempstate = bdecode(ds)
|
||||
if not tempstate.has_key('peers'):
|
||||
tempstate = {'peers': tempstate}
|
||||
statefiletemplate(tempstate)
|
||||
self.state = tempstate
|
||||
except:
|
||||
print _("**warning** statefile %s corrupt; resetting") % \
|
||||
self.dfile
|
||||
self.downloads = self.state.setdefault('peers', {})
|
||||
self.completed = self.state.setdefault('completed', {})
|
||||
|
||||
self.becache = {} # format: infohash: [[l1, s1], [l2, s2], [l3, s3]]
|
||||
for infohash, ds in self.downloads.items():
|
||||
self.seedcount[infohash] = 0
|
||||
for x,y in ds.items():
|
||||
if not y.get('nat',-1):
|
||||
ip = y.get('given_ip')
|
||||
if not (ip and self.allow_local_override(y['ip'], ip)):
|
||||
ip = y['ip']
|
||||
self.natcheckOK(infohash,x,ip,y['port'],y['left'])
|
||||
if not y['left']:
|
||||
self.seedcount[infohash] += 1
|
||||
|
||||
for infohash in self.downloads:
|
||||
self.times[infohash] = {}
|
||||
for peerid in self.downloads[infohash]:
|
||||
self.times[infohash][peerid] = 0
|
||||
|
||||
self.reannounce_interval = config['reannounce_interval']
|
||||
self.save_dfile_interval = config['save_dfile_interval']
|
||||
self.show_names = config['show_names']
|
||||
rawserver.add_task(self.save_dfile, self.save_dfile_interval)
|
||||
self.prevtime = time()
|
||||
self.timeout_downloaders_interval = config['timeout_downloaders_interval']
|
||||
rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval)
|
||||
self.logfile = None
|
||||
self.log = None
|
||||
if (config['logfile'] != '') and (config['logfile'] != '-'):
|
||||
try:
|
||||
self.logfile = config['logfile']
|
||||
self.log = open(self.logfile,'a')
|
||||
sys.stdout = self.log
|
||||
print _("# Log Started: "), isotime()
|
||||
except:
|
||||
print _("**warning** could not redirect stdout to log file: "), sys.exc_info()[0]
|
||||
|
||||
if config['hupmonitor']:
|
||||
def huphandler(signum, frame, self = self):
|
||||
try:
|
||||
self.log.close ()
|
||||
self.log = open(self.logfile,'a')
|
||||
sys.stdout = self.log
|
||||
print _("# Log reopened: "), isotime()
|
||||
except:
|
||||
print _("**warning** could not reopen logfile")
|
||||
|
||||
signal.signal(signal.SIGHUP, huphandler)
|
||||
|
||||
self.allow_get = config['allow_get']
|
||||
|
||||
if config['allowed_dir'] != '':
|
||||
self.allowed_dir = config['allowed_dir']
|
||||
self.parse_dir_interval = config['parse_dir_interval']
|
||||
self.allowed = self.state.setdefault('allowed',{})
|
||||
self.allowed_dir_files = self.state.setdefault('allowed_dir_files',{})
|
||||
self.allowed_dir_blocked = {}
|
||||
self.parse_allowed()
|
||||
else:
|
||||
try:
|
||||
del self.state['allowed']
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
del self.state['allowed_dir_files']
|
||||
except:
|
||||
pass
|
||||
self.allowed = None
|
||||
|
||||
self.uq_broken = unquote('+') != ' '
|
||||
self.keep_dead = config['keep_dead']
|
||||
|
||||
def allow_local_override(self, ip, given_ip):
|
||||
return is_valid_ipv4(given_ip) and (
|
||||
not self.only_local_override_ip or is_local_ip(ip) )
|
||||
|
||||
def get_infopage(self):
|
||||
try:
|
||||
if not self.config['show_infopage']:
|
||||
return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
|
||||
red = self.config['infopage_redirect']
|
||||
if red != '':
|
||||
return (302, 'Found', {'Content-Type': 'text/html', 'Location': red},
|
||||
'<A HREF="'+red+'">Click Here</A>')
|
||||
|
||||
s = StringIO()
|
||||
s.write('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n' \
|
||||
'<html><head><title>BitTorrent download info</title>\n')
|
||||
if self.favicon is not None:
|
||||
s.write('<link rel="shortcut icon" href="/favicon.ico">\n')
|
||||
s.write('</head>\n<body>\n' \
|
||||
'<h3>BitTorrent download info</h3>\n'\
|
||||
'<ul>\n'
|
||||
'<li><strong>tracker version:</strong> %s</li>\n' \
|
||||
'<li><strong>server time:</strong> %s</li>\n' \
|
||||
'</ul>\n' % (version, isotime()))
|
||||
if self.allowed is not None:
|
||||
if self.show_names:
|
||||
names = [ (value['name'], infohash)
|
||||
for infohash, value in self.allowed.iteritems()]
|
||||
else:
|
||||
names = [(None, infohash) for infohash in self.allowed]
|
||||
else:
|
||||
names = [ (None, infohash) for infohash in self.downloads]
|
||||
if not names:
|
||||
s.write('<p>not tracking any files yet...</p>\n')
|
||||
else:
|
||||
names.sort()
|
||||
tn = 0
|
||||
tc = 0
|
||||
td = 0
|
||||
tt = 0 # Total transferred
|
||||
ts = 0 # Total size
|
||||
nf = 0 # Number of files displayed
|
||||
if self.allowed is not None and self.show_names:
|
||||
s.write('<table summary="files" border="1">\n' \
|
||||
'<tr><th>info hash</th><th>torrent name</th><th align="right">size</th><th align="right">complete</th><th align="right">downloading</th><th align="right">downloaded</th><th align="right">transferred</th></tr>\n')
|
||||
else:
|
||||
s.write('<table summary="files">\n' \
|
||||
'<tr><th>info hash</th><th align="right">complete</th><th align="right">downloading</th><th align="right">downloaded</th></tr>\n')
|
||||
for name, infohash in names:
|
||||
l = self.downloads[infohash]
|
||||
n = self.completed.get(infohash, 0)
|
||||
tn = tn + n
|
||||
c = self.seedcount[infohash]
|
||||
tc = tc + c
|
||||
d = len(l) - c
|
||||
td = td + d
|
||||
nf = nf + 1
|
||||
if self.allowed is not None and self.show_names:
|
||||
if self.allowed.has_key(infohash):
|
||||
sz = self.allowed[infohash]['length'] # size
|
||||
ts = ts + sz
|
||||
szt = sz * n # Transferred for this torrent
|
||||
tt = tt + szt
|
||||
if self.allow_get == 1:
|
||||
linkname = '<a href="/file?info_hash=' + quote(infohash) + '">' + name + '</a>'
|
||||
else:
|
||||
linkname = name
|
||||
s.write('<tr><td><code>%s</code></td><td>%s</td><td align="right">%s</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i</td><td align="right">%s</td></tr>\n' \
|
||||
% (b2a_hex(infohash), linkname, size_format(sz), c, d, n, size_format(szt)))
|
||||
else:
|
||||
s.write('<tr><td><code>%s</code></td><td align="right"><code>%i</code></td><td align="right"><code>%i</code></td><td align="right"><code>%i</code></td></tr>\n' \
|
||||
% (b2a_hex(infohash), c, d, n))
|
||||
ttn = 0
|
||||
for i in self.completed.values():
|
||||
ttn = ttn + i
|
||||
if self.allowed is not None and self.show_names:
|
||||
s.write('<tr><td align="right" colspan="2">%i files</td><td align="right">%s</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i/%i</td><td align="right">%s</td></tr>\n'
|
||||
% (nf, size_format(ts), tc, td, tn, ttn, size_format(tt)))
|
||||
else:
|
||||
s.write('<tr><td align="right">%i files</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i/%i</td></tr>\n'
|
||||
% (nf, tc, td, tn, ttn))
|
||||
s.write('</table>\n' \
|
||||
'<ul>\n' \
|
||||
'<li><em>info hash:</em> SHA1 hash of the "info" section of the metainfo (*.torrent)</li>\n' \
|
||||
'<li><em>complete:</em> number of connected clients with the complete file</li>\n' \
|
||||
'<li><em>downloading:</em> number of connected clients still downloading</li>\n' \
|
||||
'<li><em>downloaded:</em> reported complete downloads (total: current/all)</li>\n' \
|
||||
'<li><em>transferred:</em> torrent size * total downloaded (does not include partial transfers)</li>\n' \
|
||||
'</ul>\n')
|
||||
|
||||
s.write('</body>\n' \
|
||||
'</html>\n')
|
||||
return (200, 'OK', {'Content-Type': 'text/html; charset=iso-8859-1'}, s.getvalue())
|
||||
except:
|
||||
print_exc()
|
||||
return (500, 'Internal Server Error', {'Content-Type': 'text/html; charset=iso-8859-1'}, 'Server Error')
|
||||
|
||||
def scrapedata(self, infohash, return_name = True):
|
||||
l = self.downloads[infohash]
|
||||
n = self.completed.get(infohash, 0)
|
||||
c = self.seedcount[infohash]
|
||||
d = len(l) - c
|
||||
f = {'complete': c, 'incomplete': d, 'downloaded': n}
|
||||
if return_name and self.show_names and self.allowed is not None:
|
||||
f['name'] = self.allowed[infohash]['name']
|
||||
return (f)
|
||||
|
||||
def get_scrape(self, paramslist):
|
||||
fs = {}
|
||||
if paramslist.has_key('info_hash'):
|
||||
if self.config['scrape_allowed'] not in ['specific', 'full']:
|
||||
return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
|
||||
bencode({'failure reason':
|
||||
_("specific scrape function is not available with this tracker.")}))
|
||||
for infohash in paramslist['info_hash']:
|
||||
if self.allowed is not None and infohash not in self.allowed:
|
||||
continue
|
||||
if infohash in self.downloads:
|
||||
fs[infohash] = self.scrapedata(infohash)
|
||||
else:
|
||||
if self.config['scrape_allowed'] != 'full':
|
||||
return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
|
||||
bencode({'failure reason':
|
||||
_("full scrape function is not available with this tracker.")}))
|
||||
if self.allowed is not None:
|
||||
hashes = self.allowed
|
||||
else:
|
||||
hashes = self.downloads
|
||||
for infohash in hashes:
|
||||
fs[infohash] = self.scrapedata(infohash)
|
||||
|
||||
return (200, 'OK', {'Content-Type': 'text/plain'}, bencode({'files': fs}))
|
||||
|
||||
def get_file(self, infohash):
|
||||
if not self.allow_get:
|
||||
return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
|
||||
_("get function is not available with this tracker."))
|
||||
if not self.allowed.has_key(infohash):
|
||||
return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
|
||||
fname = self.allowed[infohash]['file']
|
||||
fpath = self.allowed[infohash]['path']
|
||||
return (200, 'OK', {'Content-Type': 'application/x-bittorrent',
|
||||
'Content-Disposition': 'attachment; filename=' + fname},
|
||||
open(fpath, 'rb').read())
|
||||
|
||||
def check_allowed(self, infohash, paramslist):
|
||||
if self.allowed is not None:
|
||||
if not self.allowed.has_key(infohash):
|
||||
return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
|
||||
bencode({'failure reason':
|
||||
_("Requested download is not authorized for use with this tracker.")}))
|
||||
if self.config['allowed_controls']:
|
||||
if self.allowed[infohash].has_key('failure reason'):
|
||||
return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
|
||||
bencode({'failure reason': self.allowed[infohash]['failure reason']}))
|
||||
|
||||
return None
|
||||
|
||||
def add_data(self, infohash, event, ip, paramslist):
|
||||
peers = self.downloads.setdefault(infohash, {})
|
||||
ts = self.times.setdefault(infohash, {})
|
||||
self.completed.setdefault(infohash, 0)
|
||||
self.seedcount.setdefault(infohash, 0)
|
||||
|
||||
def params(key, default = None, l = paramslist):
|
||||
if l.has_key(key):
|
||||
return l[key][0]
|
||||
return default
|
||||
|
||||
myid = params('peer_id','')
|
||||
if len(myid) != 20:
|
||||
raise ValueError, 'id not of length 20'
|
||||
if event not in ['started', 'completed', 'stopped', 'snooped', None]:
|
||||
raise ValueError, 'invalid event'
|
||||
port = int(params('port',''))
|
||||
if port < 0 or port > 65535:
|
||||
raise ValueError, 'invalid port'
|
||||
left = int(params('left',''))
|
||||
if left < 0:
|
||||
raise ValueError, 'invalid amount left'
|
||||
|
||||
peer = peers.get(myid)
|
||||
mykey = params('key')
|
||||
auth = not peer or peer.get('key', -1) == mykey or peer.get('ip') == ip
|
||||
|
||||
gip = params('ip')
|
||||
local_override = gip and self.allow_local_override(ip, gip)
|
||||
if local_override:
|
||||
ip1 = gip
|
||||
else:
|
||||
ip1 = ip
|
||||
if not auth and local_override and self.only_local_override_ip:
|
||||
auth = True
|
||||
|
||||
if params('numwant') is not None:
|
||||
rsize = min(int(params('numwant')), self.max_give)
|
||||
else:
|
||||
rsize = self.response_size
|
||||
|
||||
if event == 'stopped':
|
||||
if peer and auth:
|
||||
self.delete_peer(infohash,myid)
|
||||
|
||||
elif not peer:
|
||||
ts[myid] = time()
|
||||
peer = {'ip': ip, 'port': port, 'left': left}
|
||||
if mykey:
|
||||
peer['key'] = mykey
|
||||
if gip:
|
||||
peer['given ip'] = gip
|
||||
if port:
|
||||
if not self.natcheck or (local_override and self.only_local_override_ip):
|
||||
peer['nat'] = 0
|
||||
self.natcheckOK(infohash,myid,ip1,port,left)
|
||||
else:
|
||||
NatCheck(self.connectback_result,infohash,myid,ip1,port,self.rawserver)
|
||||
else:
|
||||
peer['nat'] = 2**30
|
||||
if event == 'completed':
|
||||
self.completed[infohash] += 1
|
||||
if not left:
|
||||
self.seedcount[infohash] += 1
|
||||
|
||||
peers[myid] = peer
|
||||
|
||||
else:
|
||||
if not auth:
|
||||
return rsize # return w/o changing stats
|
||||
|
||||
ts[myid] = time()
|
||||
if not left and peer['left']:
|
||||
self.completed[infohash] += 1
|
||||
self.seedcount[infohash] += 1
|
||||
if not peer.get('nat', -1):
|
||||
for bc in self.becache[infohash]:
|
||||
bc[1][myid] = bc[0][myid]
|
||||
del bc[0][myid]
|
||||
if peer['left']:
|
||||
peer['left'] = left
|
||||
|
||||
recheck = False
|
||||
if ip != peer['ip']:
|
||||
peer['ip'] = ip
|
||||
recheck = True
|
||||
if gip != peer.get('given ip'):
|
||||
if gip:
|
||||
peer['given ip'] = gip
|
||||
elif peer.has_key('given ip'):
|
||||
del peer['given ip']
|
||||
if local_override:
|
||||
if self.only_local_override_ip:
|
||||
self.natcheckOK(infohash,myid,ip1,port,left)
|
||||
else:
|
||||
recheck = True
|
||||
|
||||
if port and self.natcheck:
|
||||
if recheck:
|
||||
if peer.has_key('nat'):
|
||||
if not peer['nat']:
|
||||
l = self.becache[infohash]
|
||||
y = not peer['left']
|
||||
for x in l:
|
||||
del x[y][myid]
|
||||
del peer['nat'] # restart NAT testing
|
||||
else:
|
||||
natted = peer.get('nat', -1)
|
||||
if natted and natted < self.natcheck:
|
||||
recheck = True
|
||||
|
||||
if recheck:
|
||||
NatCheck(self.connectback_result,infohash,myid,ip1,port,self.rawserver)
|
||||
|
||||
return rsize
|
||||
|
||||
def peerlist(self, infohash, stopped, is_seed, return_type, rsize):
|
||||
data = {} # return data
|
||||
seeds = self.seedcount[infohash]
|
||||
data['complete'] = seeds
|
||||
data['incomplete'] = len(self.downloads[infohash]) - seeds
|
||||
|
||||
if ( self.allowed is not None and self.config['allowed_controls'] and
|
||||
self.allowed[infohash].has_key('warning message') ):
|
||||
data['warning message'] = self.allowed[infohash]['warning message']
|
||||
|
||||
data['interval'] = self.reannounce_interval
|
||||
if stopped or not rsize: # save some bandwidth
|
||||
data['peers'] = []
|
||||
return data
|
||||
|
||||
bc = self.becache.setdefault(infohash,[[{}, {}], [{}, {}], [{}, {}]])
|
||||
len_l = len(bc[0][0])
|
||||
len_s = len(bc[0][1])
|
||||
if not (len_l+len_s): # caches are empty!
|
||||
data['peers'] = []
|
||||
return data
|
||||
l_get_size = int(float(rsize)*(len_l)/(len_l+len_s))
|
||||
cache = self.cached.setdefault(infohash,[None,None,None])[return_type]
|
||||
if cache:
|
||||
if cache[0] + self.config['min_time_between_cache_refreshes'] < time():
|
||||
cache = None
|
||||
else:
|
||||
if ( (is_seed and len(cache[1]) < rsize)
|
||||
or len(cache[1]) < l_get_size or not cache[1] ):
|
||||
cache = None
|
||||
if not cache:
|
||||
vv = [[],[],[]]
|
||||
cache = [ time(),
|
||||
bc[return_type][0].values()+vv[return_type],
|
||||
bc[return_type][1].values() ]
|
||||
shuffle(cache[1])
|
||||
shuffle(cache[2])
|
||||
self.cached[infohash][return_type] = cache
|
||||
for rr in xrange(len(self.cached[infohash])):
|
||||
if rr != return_type:
|
||||
try:
|
||||
self.cached[infohash][rr][1].extend(vv[rr])
|
||||
except:
|
||||
pass
|
||||
if len(cache[1]) < l_get_size:
|
||||
peerdata = cache[1]
|
||||
if not is_seed:
|
||||
peerdata.extend(cache[2])
|
||||
cache[1] = []
|
||||
cache[2] = []
|
||||
else:
|
||||
if not is_seed:
|
||||
peerdata = cache[2][l_get_size-rsize:]
|
||||
del cache[2][l_get_size-rsize:]
|
||||
rsize -= len(peerdata)
|
||||
else:
|
||||
peerdata = []
|
||||
if rsize:
|
||||
peerdata.extend(cache[1][-rsize:])
|
||||
del cache[1][-rsize:]
|
||||
if return_type == 2:
|
||||
peerdata = ''.join(peerdata)
|
||||
data['peers'] = peerdata
|
||||
return data
|
||||
|
||||
def get(self, connection, path, headers):
|
||||
ip = connection.get_ip()
|
||||
|
||||
nip = get_forwarded_ip(headers)
|
||||
if nip and not self.only_local_override_ip:
|
||||
ip = nip
|
||||
|
||||
paramslist = {}
|
||||
def params(key, default = None, l = paramslist):
|
||||
if l.has_key(key):
|
||||
return l[key][0]
|
||||
return default
|
||||
|
||||
try:
|
||||
(scheme, netloc, path, pars, query, fragment) = urlparse(path)
|
||||
if self.uq_broken == 1:
|
||||
path = path.replace('+',' ')
|
||||
query = query.replace('+',' ')
|
||||
path = unquote(path)[1:]
|
||||
for s in query.split('&'):
|
||||
if s != '':
|
||||
i = s.index('=')
|
||||
kw = unquote(s[:i])
|
||||
paramslist.setdefault(kw, [])
|
||||
paramslist[kw] += [unquote(s[i+1:])]
|
||||
|
||||
if path == '' or path == 'index.html':
|
||||
return self.get_infopage()
|
||||
if path == 'scrape':
|
||||
return self.get_scrape(paramslist)
|
||||
if (path == 'file'):
|
||||
return self.get_file(params('info_hash'))
|
||||
if path == 'favicon.ico' and self.favicon is not None:
|
||||
return (200, 'OK', {'Content-Type' : 'image/x-icon'}, self.favicon)
|
||||
if path != 'announce':
|
||||
return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
|
||||
|
||||
# main tracker function
|
||||
infohash = params('info_hash')
|
||||
if not infohash:
|
||||
raise ValueError, 'no info hash'
|
||||
|
||||
notallowed = self.check_allowed(infohash, paramslist)
|
||||
if notallowed:
|
||||
return notallowed
|
||||
|
||||
event = params('event')
|
||||
|
||||
rsize = self.add_data(infohash, event, ip, paramslist)
|
||||
|
||||
except ValueError, e:
|
||||
return (400, 'Bad Request', {'Content-Type': 'text/plain'},
|
||||
'you sent me garbage - ' + str(e))
|
||||
|
||||
if params('compact'):
|
||||
return_type = 2
|
||||
elif params('no_peer_id'):
|
||||
return_type = 1
|
||||
else:
|
||||
return_type = 0
|
||||
|
||||
data = self.peerlist(infohash, event=='stopped', not params('left'),
|
||||
return_type, rsize)
|
||||
|
||||
if paramslist.has_key('scrape'):
|
||||
data['scrape'] = self.scrapedata(infohash, False)
|
||||
|
||||
return (200, 'OK', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, bencode(data))
|
||||
|
||||
def natcheckOK(self, infohash, peerid, ip, port, not_seed):
|
||||
bc = self.becache.setdefault(infohash,[[{}, {}], [{}, {}], [{}, {}]])
|
||||
bc[0][not not_seed][peerid] = Bencached(bencode({'ip': ip, 'port': port,
|
||||
'peer id': peerid}))
|
||||
bc[1][not not_seed][peerid] = Bencached(bencode({'ip': ip, 'port': port}))
|
||||
bc[2][not not_seed][peerid] = compact_peer_info(ip, port)
|
||||
|
||||
def natchecklog(self, peerid, ip, port, result):
|
||||
year, month, day, hour, minute, second, a, b, c = localtime(time())
|
||||
print '%s - %s [%02d/%3s/%04d:%02d:%02d:%02d] "!natcheck-%s:%i" %i 0 - -' % (
|
||||
ip, quote(peerid), day, months[month], year, hour, minute, second,
|
||||
ip, port, result)
|
||||
|
||||
def connectback_result(self, result, downloadid, peerid, ip, port):
|
||||
record = self.downloads.get(downloadid, {}).get(peerid)
|
||||
if ( record is None
|
||||
or (record['ip'] != ip and record.get('given ip') != ip)
|
||||
or record['port'] != port ):
|
||||
if self.config['log_nat_checks']:
|
||||
self.natchecklog(peerid, ip, port, 404)
|
||||
return
|
||||
if self.config['log_nat_checks']:
|
||||
if result:
|
||||
x = 200
|
||||
else:
|
||||
x = 503
|
||||
self.natchecklog(peerid, ip, port, x)
|
||||
if not record.has_key('nat'):
|
||||
record['nat'] = int(not result)
|
||||
if result:
|
||||
self.natcheckOK(downloadid,peerid,ip,port,record['left'])
|
||||
elif result and record['nat']:
|
||||
record['nat'] = 0
|
||||
self.natcheckOK(downloadid,peerid,ip,port,record['left'])
|
||||
elif not result:
|
||||
record['nat'] += 1
|
||||
|
||||
def save_dfile(self):
|
||||
self.rawserver.add_task(self.save_dfile, self.save_dfile_interval)
|
||||
h = open(self.dfile, 'wb')
|
||||
h.write(bencode(self.state))
|
||||
h.close()
|
||||
|
||||
def parse_allowed(self):
|
||||
self.rawserver.add_task(self.parse_allowed, self.parse_dir_interval)
|
||||
|
||||
# logging broken .torrent files would be useful but could confuse
|
||||
# programs parsing log files, so errors are just ignored for now
|
||||
def ignore(message):
|
||||
pass
|
||||
r = parsedir(self.allowed_dir, self.allowed, self.allowed_dir_files,
|
||||
self.allowed_dir_blocked, ignore,include_metainfo = False)
|
||||
( self.allowed, self.allowed_dir_files, self.allowed_dir_blocked,
|
||||
added, garbage2 ) = r
|
||||
|
||||
for infohash in added:
|
||||
self.downloads.setdefault(infohash, {})
|
||||
self.completed.setdefault(infohash, 0)
|
||||
self.seedcount.setdefault(infohash, 0)
|
||||
|
||||
self.state['allowed'] = self.allowed
|
||||
self.state['allowed_dir_files'] = self.allowed_dir_files
|
||||
|
||||
def delete_peer(self, infohash, peerid):
|
||||
dls = self.downloads[infohash]
|
||||
peer = dls[peerid]
|
||||
if not peer['left']:
|
||||
self.seedcount[infohash] -= 1
|
||||
if not peer.get('nat',-1):
|
||||
l = self.becache[infohash]
|
||||
y = not peer['left']
|
||||
for x in l:
|
||||
del x[y][peerid]
|
||||
del self.times[infohash][peerid]
|
||||
del dls[peerid]
|
||||
|
||||
def expire_downloaders(self):
|
||||
for infohash, peertimes in self.times.items():
|
||||
for myid, t in peertimes.items():
|
||||
if t < self.prevtime:
|
||||
self.delete_peer(infohash, myid)
|
||||
self.prevtime = time()
|
||||
if (self.keep_dead != 1):
|
||||
for key, peers in self.downloads.items():
|
||||
if len(peers) == 0 and (self.allowed is None or
|
||||
key not in self.allowed):
|
||||
del self.times[key]
|
||||
del self.downloads[key]
|
||||
del self.seedcount[key]
|
||||
self.rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval)
|
||||
|
||||
def track(args):
|
||||
if len(args) == 0:
|
||||
print formatDefinitions(defaults, 80)
|
||||
return
|
||||
try:
|
||||
config, files = parseargs(args, defaults, 0, 0)
|
||||
except ValueError, e:
|
||||
print _("error: ") + str(e)
|
||||
print _("run with no arguments for parameter explanations")
|
||||
return
|
||||
r = RawServer(Event(), config)
|
||||
t = Tracker(config, r)
|
||||
s = r.create_serversocket(config['port'], config['bind'], True)
|
||||
r.start_listening(s, HTTPHandler(t.get, config['min_time_between_log_flushes']))
|
||||
r.listen_forever()
|
||||
t.save_dfile()
|
||||
print _("# Shutting down: ") + isotime()
|
||||
|
||||
def size_format(s):
|
||||
if (s < 1024):
|
||||
r = str(s) + 'B'
|
||||
elif (s < 1048576):
|
||||
r = str(int(s/1024)) + 'KiB'
|
||||
elif (s < 1073741824):
|
||||
r = str(int(s/1048576)) + 'MiB'
|
||||
elif (s < 1099511627776):
|
||||
r = str(int((s/1073741824.0)*100.0)/100.0) + 'GiB'
|
||||
else:
|
||||
r = str(int((s/1099511627776.0)*100.0)/100.0) + 'TiB'
|
||||
return(r)
|
164
BitTorrent/zurllib.py
Executable file
164
BitTorrent/zurllib.py
Executable file
@ -0,0 +1,164 @@
|
||||
#
|
||||
# zurllib.py
|
||||
#
|
||||
# This is (hopefully) a drop-in for urllib which will request gzip/deflate
|
||||
# compression and then decompress the output if a compressed response is
|
||||
# received while maintaining the API.
|
||||
#
|
||||
# by Robert Stone 2/22/2003
|
||||
# extended by Matt Chisholm
|
||||
#
|
||||
|
||||
from BitTorrent.platform import user_agent
|
||||
import urllib2
|
||||
OldOpenerDirector = urllib2.OpenerDirector
|
||||
|
||||
class MyOpenerDirector(OldOpenerDirector):
|
||||
def __init__(self):
|
||||
OldOpenerDirector.__init__(self)
|
||||
server_version = user_agent
|
||||
self.addheaders = [('User-agent', server_version)]
|
||||
|
||||
urllib2.OpenerDirector = MyOpenerDirector
|
||||
|
||||
del urllib2
|
||||
|
||||
from urllib import *
|
||||
from urllib2 import *
|
||||
from gzip import GzipFile
|
||||
from StringIO import StringIO
|
||||
import pprint
|
||||
|
||||
DEBUG=0
|
||||
|
||||
|
||||
class HTTPContentEncodingHandler(HTTPHandler):
|
||||
"""Inherit and add gzip/deflate/etc support to HTTP gets."""
|
||||
def http_open(self, req):
|
||||
# add the Accept-Encoding header to the request
|
||||
# support gzip encoding (identity is assumed)
|
||||
req.add_header("Accept-Encoding","gzip")
|
||||
if DEBUG:
|
||||
print "Sending:"
|
||||
print req.headers
|
||||
print "\n"
|
||||
fp = HTTPHandler.http_open(self,req)
|
||||
headers = fp.headers
|
||||
if DEBUG:
|
||||
pprint.pprint(headers.dict)
|
||||
url = fp.url
|
||||
resp = addinfourldecompress(fp, headers, url)
|
||||
if hasattr(fp, 'code'):
|
||||
resp.code = fp.code
|
||||
if hasattr(fp, 'msg'):
|
||||
resp.msg = fp.msg
|
||||
return resp
|
||||
|
||||
|
||||
class addinfourldecompress(addinfourl):
|
||||
"""Do gzip decompression if necessary. Do addinfourl stuff too."""
|
||||
def __init__(self, fp, headers, url):
|
||||
# we need to do something more sophisticated here to deal with
|
||||
# multiple values? What about other weird crap like q-values?
|
||||
# basically this only works for the most simplistic case and will
|
||||
# break in some other cases, but for now we only care about making
|
||||
# this work with the BT tracker so....
|
||||
if headers.has_key('content-encoding') and headers['content-encoding'] == 'gzip':
|
||||
if DEBUG:
|
||||
print "Contents of Content-encoding: " + headers['Content-encoding'] + "\n"
|
||||
self.gzip = 1
|
||||
self.rawfp = fp
|
||||
fp = GzipStream(fp)
|
||||
else:
|
||||
self.gzip = 0
|
||||
return addinfourl.__init__(self, fp, headers, url)
|
||||
|
||||
def close(self):
|
||||
self.fp.close()
|
||||
if self.gzip:
|
||||
self.rawfp.close()
|
||||
|
||||
def iscompressed(self):
|
||||
return self.gzip
|
||||
|
||||
class GzipStream(StringIO):
|
||||
"""Magically decompress a file object.
|
||||
|
||||
This is not the most efficient way to do this but GzipFile() wants
|
||||
to seek, etc, which won't work for a stream such as that from a socket.
|
||||
So we copy the whole shebang info a StringIO object, decompress that
|
||||
then let people access the decompressed output as a StringIO object.
|
||||
|
||||
The disadvantage is memory use and the advantage is random access.
|
||||
|
||||
Will mess with fixing this later.
|
||||
"""
|
||||
|
||||
def __init__(self,fp):
|
||||
self.fp = fp
|
||||
|
||||
# this is nasty and needs to be fixed at some point
|
||||
# copy everything into a StringIO (compressed)
|
||||
compressed = StringIO()
|
||||
r = fp.read()
|
||||
while r:
|
||||
compressed.write(r)
|
||||
r = fp.read()
|
||||
# now, unzip (gz) the StringIO to a string
|
||||
compressed.seek(0,0)
|
||||
gz = GzipFile(fileobj = compressed)
|
||||
str = ''
|
||||
r = gz.read()
|
||||
while r:
|
||||
str += r
|
||||
r = gz.read()
|
||||
# close our utility files
|
||||
compressed.close()
|
||||
gz.close()
|
||||
# init our stringio selves with the string
|
||||
StringIO.__init__(self, str)
|
||||
del str
|
||||
|
||||
def close(self):
|
||||
self.fp.close()
|
||||
return StringIO.close(self)
|
||||
|
||||
|
||||
def test():
|
||||
"""Test this module.
|
||||
|
||||
At the moment this is lame.
|
||||
"""
|
||||
|
||||
print "Running unit tests.\n"
|
||||
|
||||
def printcomp(fp):
|
||||
try:
|
||||
if fp.iscompressed():
|
||||
print "GET was compressed.\n"
|
||||
else:
|
||||
print "GET was uncompressed.\n"
|
||||
except:
|
||||
print "no iscompressed function! this shouldn't happen"
|
||||
|
||||
print "Trying to GET a compressed document...\n"
|
||||
fp = urlopen('http://a.scarywater.net/hng/index.shtml')
|
||||
print fp.read()
|
||||
printcomp(fp)
|
||||
fp.close()
|
||||
|
||||
print "Trying to GET an unknown document...\n"
|
||||
fp = urlopen('http://www.otaku.org/')
|
||||
print fp.read()
|
||||
printcomp(fp)
|
||||
fp.close()
|
||||
|
||||
|
||||
#
|
||||
# Install the HTTPContentEncodingHandler that we've defined above.
|
||||
#
|
||||
install_opener(build_opener(HTTPContentEncodingHandler, ProxyHandler({})))
|
||||
|
||||
if __name__ == '__main__':
|
||||
test()
|
||||
|
10
Foundation/Conversion.py
Executable file
10
Foundation/Conversion.py
Executable file
@ -0,0 +1,10 @@
|
||||
"""
|
||||
Compatibility module
|
||||
"""
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"import PyObjCTools.Conversion instead of Foundation.Conversion",
|
||||
DeprecationWarning)
|
||||
|
||||
from PyObjCTools.Conversion import *
|
BIN
Foundation/_Foundation.so
Executable file
BIN
Foundation/_Foundation.so
Executable file
Binary file not shown.
49
Foundation/__init__.py
Executable file
49
Foundation/__init__.py
Executable file
@ -0,0 +1,49 @@
|
||||
import objc as _objc
|
||||
from _Foundation import *
|
||||
|
||||
NSClassFromString = _objc.lookUpClass
|
||||
|
||||
# Do something smart to collect Foundation classes...
|
||||
|
||||
if _objc.platform == 'MACOSX':
|
||||
_objc.loadBundle(
|
||||
'Foundation',
|
||||
globals(),
|
||||
bundle_identifier=u'com.apple.Foundation',
|
||||
)
|
||||
else:
|
||||
_objc.loadBundle(
|
||||
'Foundation',
|
||||
globals(),
|
||||
bundle_path=_objc.pathForFramework(
|
||||
u'/System/Library/Frameworks/Foundation.framework',
|
||||
),
|
||||
)
|
||||
|
||||
def _initialize():
|
||||
import sys, os
|
||||
if 'PYOBJCFRAMEWORKS' in os.environ:
|
||||
paths = os.environ['PYOBJCFRAMEWORKS'].split(":")
|
||||
count = 0
|
||||
for path in paths:
|
||||
bundle = NSBundle.bundleWithPath_(path)
|
||||
bundle.principalClass()
|
||||
sys.path.insert(count, str(bundle.resourcePath()))
|
||||
count = count + 1
|
||||
|
||||
initPath = bundle.pathForResource_ofType_( "Init", "py")
|
||||
if initPath:
|
||||
execfile(initPath, globals(), locals())
|
||||
|
||||
_initialize()
|
||||
|
||||
import protocols # no need to export these, just register with PyObjC
|
||||
|
||||
#
|
||||
# (informal) protocols eported for b/w compatibility
|
||||
#
|
||||
from protocols import NSConnectionDelegateMethods, \
|
||||
NSDistantObjectRequestMethods, \
|
||||
NSCopyLinkMoveHandler, NSKeyedArchiverDelegate, \
|
||||
NSKeyedUnarchiverDelegate, NSNetServiceDelegateMethods, \
|
||||
NSNetServiceBrowserDelegateMethods, NSPortDelegateMethods
|
1856
Foundation/protocols.py
Executable file
1856
Foundation/protocols.py
Executable file
File diff suppressed because it is too large
Load Diff
1
LICENSE
Normal file
1
LICENSE
Normal file
@ -0,0 +1 @@
|
||||
Creative Commons Attribution-Noncommercial 3.0 United States License
|
4
README
Normal file
4
README
Normal file
@ -0,0 +1,4 @@
|
||||
This is the sourcecode behind python engine of the now dead podcatching application iPodderX.
|
||||
|
||||
I open-sourced this code on my blog awhile ago but now that github exists I thought it better to post it here for everyone
|
||||
to enjoy and learn from
|
BIN
btlocale/fr/LC_MESSAGES/bittorrent.mo
Executable file
BIN
btlocale/fr/LC_MESSAGES/bittorrent.mo
Executable file
Binary file not shown.
2625
btlocale/fr/LC_MESSAGES/bittorrent.po
Executable file
2625
btlocale/fr/LC_MESSAGES/bittorrent.po
Executable file
File diff suppressed because it is too large
Load Diff
BIN
btlocale/he_IL/LC_MESSAGES/bittorrent.mo
Executable file
BIN
btlocale/he_IL/LC_MESSAGES/bittorrent.mo
Executable file
Binary file not shown.
2583
btlocale/he_IL/LC_MESSAGES/bittorrent.po
Executable file
2583
btlocale/he_IL/LC_MESSAGES/bittorrent.po
Executable file
File diff suppressed because it is too large
Load Diff
BIN
btlocale/it/LC_MESSAGES/bittorrent.mo
Executable file
BIN
btlocale/it/LC_MESSAGES/bittorrent.mo
Executable file
Binary file not shown.
2802
btlocale/it/LC_MESSAGES/bittorrent.po
Executable file
2802
btlocale/it/LC_MESSAGES/bittorrent.po
Executable file
File diff suppressed because it is too large
Load Diff
BIN
btlocale/no/LC_MESSAGES/bittorrent.mo
Executable file
BIN
btlocale/no/LC_MESSAGES/bittorrent.mo
Executable file
Binary file not shown.
2766
btlocale/no/LC_MESSAGES/bittorrent.po
Executable file
2766
btlocale/no/LC_MESSAGES/bittorrent.po
Executable file
File diff suppressed because it is too large
Load Diff
BIN
btlocale/pt_BR/LC_MESSAGES/bittorrent.mo
Executable file
BIN
btlocale/pt_BR/LC_MESSAGES/bittorrent.mo
Executable file
Binary file not shown.
2783
btlocale/pt_BR/LC_MESSAGES/bittorrent.po
Executable file
2783
btlocale/pt_BR/LC_MESSAGES/bittorrent.po
Executable file
File diff suppressed because it is too large
Load Diff
2591
feedparser.py
Executable file
2591
feedparser.py
Executable file
File diff suppressed because it is too large
Load Diff
177
iPXAgent.py
Executable file
177
iPXAgent.py
Executable file
@ -0,0 +1,177 @@
|
||||
#(c) 2004-2008 Thunderstone Media, LLC
|
||||
#Creative Commons Attribution-Noncommercial 3.0 United States License
|
||||
#
|
||||
#Python Developyment By:
|
||||
#
|
||||
#Ray Slakinski
|
||||
#August Trometer
|
||||
|
||||
import iPXSettings
|
||||
import iPXClass
|
||||
from iPXTools import *
|
||||
|
||||
def usage():
|
||||
printMSG('\r\n%s' % iPXSettings.USER_AGENT)
|
||||
printMSG('-help\r\nPrints this help screen')
|
||||
printMSG('\r\n-url=="<feedURL>"\r\nGets just the specified feed URL (must exist in feeds.plist)')
|
||||
printMSG('\r\n-id=="<feedID>"\r\nGets just the feed URL associated to the feed ID specified')
|
||||
printMSG('\r\n-getEnc=="<feedID>;;<entryGUID>;;<encURL>"\r\nGets just the secified enclosure')
|
||||
printMSG('\r\n-updateApps=="<feedID>;;<entryGUID>;;<encGUID>"\r\nAdds the file into the appropriete application')
|
||||
printMSG('\r\n-progName=="<programName>"\r\nTells the engine where to find program files')
|
||||
printMSG('\r\n-ui\r\nIf the GUI is running the command then it should specifiy this param')
|
||||
printMSG('\r\n-initSmartSpace\r\nInitializes cache files for SmartSpace')
|
||||
printMSG('\r\n-debug\r\nEnables printing of debug messages to the console')
|
||||
printMSG('\r\n-superdebug\r\nEnables printing of debug messages and HTTP traffic information to the console\r\n')
|
||||
|
||||
def firstInit():
|
||||
import os
|
||||
|
||||
iPXSettings.initSettings()
|
||||
checkDir(iPXSettings.tmpDownloadDir)
|
||||
|
||||
if os.path.isdir(iPXSettings.historyFile):
|
||||
logIt('History is a directory? - Resetting history')
|
||||
delTree(iPXSettings.historyFile)
|
||||
os.removedirs(iPXSettings.historyFile)
|
||||
try:
|
||||
#Resetting downloadBehavior
|
||||
logIt('Resetting downloadBehavior to 3')
|
||||
FeedListPrefs = plistlib.Plist.fromFile(file('%sfeeds.plist' % iPXSettings.rssPath))
|
||||
feedDetails = FeedListPrefs['iPodderXFeeds']
|
||||
|
||||
for feed in feedDetails:
|
||||
feed['downloadBehavior'] = 3
|
||||
|
||||
FeedListPrefs.write('%sfeeds.plist' % iPXSettings.rssPath)
|
||||
except Exception, msg:
|
||||
logIt('ERRMSG: %s' % str(msg))
|
||||
|
||||
checkDir(iPXSettings.logPath)
|
||||
checkDir(iPXSettings.downloadDirectory)
|
||||
checkDir(iPXSettings.rssPath)
|
||||
checkDir('%sfeedData/' % iPXSettings.rssPath)
|
||||
trimLog()
|
||||
iPXSettings.globalProxySetting = setProxy()
|
||||
if len(iPXSettings.globalProxySetting) > 0:
|
||||
logIt('Proxy Server Detected...')
|
||||
|
||||
def main():
|
||||
import re, sys
|
||||
|
||||
for arg in sys.argv:
|
||||
if re.search('-help', arg, re.IGNORECASE):
|
||||
gotFeedAlready = True
|
||||
usage()
|
||||
sys.exit()
|
||||
|
||||
iPXSettings.progName = 'iPodderX'
|
||||
for arg in sys.argv:
|
||||
if re.search('-progName', arg, re.IGNORECASE):
|
||||
progNameSplit = arg.split('==')
|
||||
iPXSettings.progName = progNameSplit[len(progNameSplit)-1]
|
||||
|
||||
firstInit()
|
||||
for arg in sys.argv:
|
||||
if arg == '-debug':
|
||||
printMSG( '--- DEBUG ENABLED ---')
|
||||
iPXSettings.DEBUG = 1
|
||||
elif arg == '-superdebug':
|
||||
printMSG( '--- SUPER DEBUG ENABLED ---')
|
||||
import httplib
|
||||
httplib.HTTPConnection.debuglevel = 1
|
||||
iPXSettings.DEBUG = 1
|
||||
iPXSettings.SUPERDEBUG = 1
|
||||
|
||||
try:
|
||||
import psyco
|
||||
logIt('Enabling Psyco JIT Compiler...')
|
||||
psyco.full()
|
||||
print passed
|
||||
except Exception, msg:
|
||||
pass
|
||||
|
||||
logIt('\r\n%s Feed Scan Started: %s' % (iPXSettings.USER_AGENT, strftime('%H:%M:%S -- %m/%d/%Y',localtime())))
|
||||
logIt('Platform: %s\r\n' % sys.platform)
|
||||
if checkReg() == 0:
|
||||
logIt('%s is UNREGISTERED' % iPXSettings.USER_AGENT)
|
||||
printMSG('APPLICATION_UNREGISTERED\r\n')
|
||||
sys.exit(0)
|
||||
|
||||
logIt('Params used to launch:')
|
||||
for arg in sys.argv:
|
||||
if not arg == sys.argv[0]:
|
||||
logIt(str(arg))
|
||||
|
||||
#Checking to see if the script was run from iPodderX.app or not...
|
||||
for arg in sys.argv:
|
||||
if re.search('-ui', arg, re.IGNORECASE):
|
||||
iPXSettings.ranFromUI = True
|
||||
logIt('Feed check being run from UI')
|
||||
if iPXSettings.ranFromUI == False:
|
||||
if checkForScript() == True:
|
||||
sys.exit()
|
||||
pass
|
||||
|
||||
gotFeedAlready = False
|
||||
feedsObj = iPXClass.Feeds()
|
||||
if len(feedsObj.feedList) > 0:
|
||||
for arg in sys.argv:
|
||||
if re.search('-initSmartSpace', arg, re.IGNORECASE):
|
||||
from iPXQuotaManager import getAllowedDelList
|
||||
printMSG('Generating SmartSpace Data...')
|
||||
delTree('%s/iPXCache/' % iPXSettings.tempDir)
|
||||
checkDir('%s/iPXCache/' % iPXSettings.tempDir)
|
||||
gotFeedAlready = True
|
||||
x, y = getAllowedDelList()
|
||||
break
|
||||
if re.search('-url==', arg, re.IGNORECASE):
|
||||
gotFeedAlready = True
|
||||
argsSplit = arg.split('==')
|
||||
feedsObj.retrFeed(argsSplit[1], 0)
|
||||
if re.search('-id==', arg, re.IGNORECASE):
|
||||
gotFeedAlready = True
|
||||
argsSplit = arg.split('==')
|
||||
feedsObj.retrFeed(argsSplit[1], 1)
|
||||
if re.search('-getEnc==', arg, re.IGNORECASE):
|
||||
gotFeedAlready = True
|
||||
iPXSettings.torrentMinDownRate = 0
|
||||
argsSplit = arg.split('==')
|
||||
temp = argsSplit[1]
|
||||
params = temp.split(';;')
|
||||
if iPXSettings.ranFromUI:
|
||||
#strip quotes off the params...
|
||||
param1 = params[0]
|
||||
param2 = params[1]
|
||||
param3 = params[2]
|
||||
param3 = param3.replace('-progName', '')
|
||||
param3 = param3.strip()
|
||||
feedsObj.retrEnclosure(param1[1:], param2, param3[:len(param3)-1])
|
||||
else:
|
||||
feedsObj.retrEnclosure(params[0], params[1], params[2])
|
||||
if re.search('-updateApps==', arg, re.IGNORECASE):
|
||||
gotFeedAlready = True
|
||||
argsSplit = arg.split('==')
|
||||
temp = argsSplit[1]
|
||||
params = temp.split(';;')
|
||||
if iPXSettings.ranFromUI:
|
||||
#strip quotes off the params...
|
||||
param1 = params[0]
|
||||
param2 = params[1]
|
||||
param3 = params[2]
|
||||
feedsObj.retrEnclosure(param1[1:], param2, param3[:len(param3)-1], True)
|
||||
else:
|
||||
feedsObj.retrEnclosure(params[0], params[1], params[2], True)
|
||||
|
||||
if not gotFeedAlready:
|
||||
feedsObj.retrFeeds()
|
||||
else:
|
||||
logIt('feeds.plist in use or empty...')
|
||||
|
||||
if not iPXSettings.ranFromUI:
|
||||
delTree(iPXSettings.tmpDownloadDir)
|
||||
|
||||
printMSG('SUBSCRIPTION_SCAN_COMPLETE')
|
||||
logIt('%s Feed Scan Completed: %s' % (iPXSettings.USER_AGENT, strftime('%H:%M:%S -- %m/%d/%Y',localtime())))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
1334
iPXClass.py
Executable file
1334
iPXClass.py
Executable file
File diff suppressed because it is too large
Load Diff
662
iPXDownloader.py
Executable file
662
iPXDownloader.py
Executable file
@ -0,0 +1,662 @@
|
||||
#(c) 2004-2008 Thunderstone Media, LLC
|
||||
#Creative Commons Attribution-Noncommercial 3.0 United States License
|
||||
#
|
||||
#Python Developyment By:
|
||||
#
|
||||
#Ray Slakinski
|
||||
#August Trometer
|
||||
|
||||
from __future__ import division
|
||||
from time import *
|
||||
import iPXSettings
|
||||
from iPXTools import *
|
||||
|
||||
import gettext
|
||||
gettext.install('bittorrent', 'btlocale')
|
||||
|
||||
|
||||
def getTorrent(feedName, torrent, maxUploadRate, saveLocation, saveName):
|
||||
from BitTorrent.download import Feedback, Multitorrent
|
||||
from BitTorrent.defaultargs import get_defaults
|
||||
from BitTorrent.parseargs import printHelp
|
||||
from BitTorrent.zurllib import urlopen
|
||||
from BitTorrent.bencode import bdecode
|
||||
from BitTorrent.ConvertedMetainfo import ConvertedMetainfo
|
||||
from BitTorrent import configfile
|
||||
from BitTorrent import BTFailure
|
||||
from BitTorrent import version
|
||||
import re, threading
|
||||
|
||||
uiname = 'bittorrent-console'
|
||||
defaults = get_defaults(uiname)
|
||||
config, args = configfile.parse_configuration_and_args(defaults,uiname, '', 0, 1)
|
||||
|
||||
def fmtsize(n):
|
||||
return float(n)
|
||||
|
||||
class DL(Feedback):
|
||||
def __init__(self, metainfo, config):
|
||||
|
||||
|
||||
self.doneflag = threading.Event()
|
||||
self.metainfo = metainfo
|
||||
self.config = config
|
||||
|
||||
logIt('BT url: %s' % self.config['url'])
|
||||
logIt('BT save_as: %s' % self.config['save_as'])
|
||||
if self.config['max_upload_rate'] > 0:
|
||||
logIt('BT max_upload_rate: %s' % str(self.config['max_upload_rate']))
|
||||
|
||||
def run(self):
|
||||
import os
|
||||
|
||||
try:
|
||||
config = self.config
|
||||
self.d = HeadlessDisplayer(self.doneflag)
|
||||
self.multitorrent = Multitorrent(self.config, self.doneflag,
|
||||
self.global_error)
|
||||
# raises BTFailure if bad
|
||||
metainfo = ConvertedMetainfo(bdecode(self.metainfo))
|
||||
torrent_name = metainfo.name_fs
|
||||
|
||||
if config['save_as']:
|
||||
if config['save_in']:
|
||||
raise BTFailure('You cannot specify both --save_as and '
|
||||
'--save_in')
|
||||
saveas = config['save_as']
|
||||
elif config['save_in']:
|
||||
saveas = os.path.join(config['save_in'], torrent_name)
|
||||
else:
|
||||
saveas = torrent_namef
|
||||
|
||||
self.d.set_torrent_values(metainfo.name, os.path.abspath(saveas),
|
||||
metainfo.total_bytes, len(metainfo.hashes))
|
||||
self.torrent = self.multitorrent.start_torrent(metainfo,
|
||||
self.config, self, saveas)
|
||||
except BTFailure, e:
|
||||
globals()['torrentStatus'] = 0
|
||||
logIt(str(e))
|
||||
return
|
||||
self.get_status()
|
||||
self.multitorrent.rawserver.listen_forever()
|
||||
self.d.display({'activity':'shutting down', 'fractionDone':0})
|
||||
self.torrent.shutdown()
|
||||
|
||||
def reread_config(self):
|
||||
try:
|
||||
newvalues = configfile.get_config(self.config, 'btdownloadcurses')
|
||||
except Exception, e:
|
||||
globals()['torrentStatus'] = 0
|
||||
self.d.error('Error reading config: ' + str(e))
|
||||
return
|
||||
self.config.update(newvalues)
|
||||
# The set_option call can potentially trigger something that kills
|
||||
# the torrent (when writing this the only possibility is a change in
|
||||
# max_files_open causing an IOError while closing files), and so
|
||||
# the self.failed() callback can run during this loop.
|
||||
for option, value in newvalues.iteritems():
|
||||
self.multitorrent.set_option(option, value)
|
||||
for option, value in newvalues.iteritems():
|
||||
self.torrent.set_option(option, value)
|
||||
|
||||
def get_status(self):
|
||||
self.multitorrent.rawserver.add_task(self.get_status, self.config['display_interval'])
|
||||
|
||||
status = self.torrent.get_status(self.config['spew'])
|
||||
if iPXSettings.DEBUG:
|
||||
#logIt(str(status))
|
||||
#logIt(str(status['activity']))
|
||||
logIt('lastTorrentBeat: %s' % str(globals()['lastTorrentBeat']))
|
||||
|
||||
if str(status['activity']) == 'shut down':
|
||||
self.doneflag.set()
|
||||
self.d.finished()
|
||||
elif str(status['activity']) == 'seeding':
|
||||
self.d.display({'activity':_("shutting down"), 'fractionDone':0})
|
||||
self.torrent.shutdown()
|
||||
elif globals()['torrentStatus'] == 0:
|
||||
logIt(str(status))
|
||||
self.d.display({'activity':_("shutting down"), 'fractionDone':0})
|
||||
self.torrent.shutdown()
|
||||
elif globals()['lastTorrentBeat'] > iPXSettings.torrentMaxBeatTime:
|
||||
globals()['torrentStatus'] = 0
|
||||
logIt(str(status))
|
||||
logIt('Bittorrent is taking too long to download, aborting...')
|
||||
logIt('lastTorrentBeat: %s' % str(globals()['lastTorrentBeat']))
|
||||
self.d.display({'activity':_("shutting down"), 'fractionDone':0})
|
||||
self.torrent.shutdown()
|
||||
elif str(status['activity']) == 'downloading':
|
||||
globals()['torrentStatus'] = 1
|
||||
self.d.display(status)
|
||||
elif str(status['activity']) == 'Initial startup':
|
||||
globals()['torrentStatus'] = 1
|
||||
else:
|
||||
globals()['torrentStatus'] = 0
|
||||
self.d.display({'activity':_("shutting down"), 'fractionDone':0})
|
||||
self.torrent.shutdown()
|
||||
|
||||
def global_error(self, level, text):
|
||||
self.d.error(text)
|
||||
|
||||
def error(self, torrent, level, text):
|
||||
self.d.error(text)
|
||||
|
||||
def failed(self, torrent, is_external):
|
||||
self.doneflag.set()
|
||||
|
||||
def finished(self, torrent):
|
||||
self.d.finished()
|
||||
|
||||
class HeadlessDisplayer(object):
|
||||
def __init__(self, doneflag):
|
||||
self.doneflag = doneflag
|
||||
|
||||
self.done = False
|
||||
self.percentDone = ''
|
||||
self.timeEst = ''
|
||||
self.downRate = '---'
|
||||
self.upRate = '---'
|
||||
self.shareRating = ''
|
||||
self.seedStatus = ''
|
||||
self.peerStatus = ''
|
||||
self.errors = []
|
||||
self.file = ''
|
||||
self.downloadTo = ''
|
||||
self.fileSize = ''
|
||||
self.numpieces = 0
|
||||
|
||||
def set_torrent_values(self, name, path, size, numpieces):
|
||||
self.file = name
|
||||
self.downloadTo = path
|
||||
self.fileSize = fmtsize(size)
|
||||
self.numpieces = numpieces
|
||||
|
||||
def finished(self):
|
||||
self.done = True
|
||||
self.downRate = '---'
|
||||
self.display({'activity':'download succeeded', 'fractionDone':1})
|
||||
|
||||
def error(self, errormsg):
|
||||
logIt(str(errormsg))
|
||||
|
||||
def display(self, dict):
|
||||
from cStringIO import StringIO
|
||||
if dict.has_key('downRate'):
|
||||
if iPXSettings.DEBUG:
|
||||
logIt('downRate: %s' % str(dict['downRate'] / 1024))
|
||||
logIt('upRate: %s' % str(dict['upRate'] / 1024))
|
||||
logIt('lastDLSize: %s' % str(globals()['lastDLSize']))
|
||||
logIt('fractionDone: %s / %s' % (str(dict['fractionDone'] * 100), str(dict['fractionDone'])))
|
||||
logIt('numSeeds: %s' % str(dict['numSeeds']))
|
||||
logIt('numPeers: %s' % str(dict['numPeers']))
|
||||
if (long(dict['numSeeds'] > 2)) or (dict['downRate'] > iPXSettings.torrentMinDownRate and globals()['lastTorrentBeat'] <= iPXSettings.torrentMaxBeatTime):
|
||||
globals()['lastTorrentBeat'] = 0
|
||||
if globals()['lastDLSize'] < dict['fractionDone'] * 100:
|
||||
if iPXSettings.DEBUG:
|
||||
printMSG(';;1;;1;;%.2f;;%.2f;;%.2f' % (100.0, dict['fractionDone'] * 100, dict['downRate'] / 1024))
|
||||
else:
|
||||
printMSG(';;1;;1;;%.2f;;%.2f' % (100.0, dict['fractionDone'] * 100))
|
||||
globals()['lastDLSize'] = (dict['fractionDone'] * 100) + 1.0
|
||||
else:
|
||||
globals()['lastTorrentBeat'] += 1
|
||||
|
||||
def print_spew(self, spew):
|
||||
s = StringIO()
|
||||
s.write('\n\n\n')
|
||||
for c in spew:
|
||||
s.write('%20s ' % c['ip'])
|
||||
if c['initiation'] == 'L':
|
||||
s.write('l')
|
||||
else:
|
||||
s.write('r')
|
||||
total, rate, interested, choked = c['upload']
|
||||
s.write(' %10s %10s ' % (str(int(total/10485.76)/100),
|
||||
str(int(rate))))
|
||||
if c['is_optimistic_unchoke']:
|
||||
s.write('*')
|
||||
else:
|
||||
s.write(' ')
|
||||
if interested:
|
||||
s.write('i')
|
||||
else:
|
||||
s.write(' ')
|
||||
if choked:
|
||||
s.write('c')
|
||||
else:
|
||||
s.write(' ')
|
||||
|
||||
total, rate, interested, choked, snubbed = c['download']
|
||||
s.write(' %10s %10s ' % (str(int(total/10485.76)/100),
|
||||
str(int(rate))))
|
||||
if interested:
|
||||
s.write('i')
|
||||
else:
|
||||
s.write(' ')
|
||||
if choked:
|
||||
s.write('c')
|
||||
else:
|
||||
s.write(' ')
|
||||
if snubbed:
|
||||
s.write('s')
|
||||
else:
|
||||
s.write(' ')
|
||||
s.write('\n')
|
||||
print s.getvalue()
|
||||
|
||||
status = 0
|
||||
if re.search('=', saveName, re.IGNORECASE):
|
||||
saveNameSplit = saveName.split('=')
|
||||
if len(saveNameSplit[(len(saveNameSplit)-1)]) > 1:
|
||||
saveName = saveNameSplit[(len(saveNameSplit)-1)]
|
||||
|
||||
saveName = stringCleaning(saveName)
|
||||
|
||||
logIt('%s: %s [Torrent]' % (feedName, saveName))
|
||||
printMSG('%s: %s [Torrent]' % (feedName, saveName))
|
||||
|
||||
try:
|
||||
checkDir(saveLocation)
|
||||
params = ['--url', torrent, '--max_upload_rate', maxUploadRate, '--minport', iPXSettings.torrentMinPort, '--maxport', iPXSettings.torrentMinPort, '--save_as', saveLocation + '/' + saveName]
|
||||
config, args = configfile.parse_configuration_and_args(defaults,uiname, params, 0, 1)
|
||||
|
||||
if config['url']:
|
||||
h = urlopen(config['url'])
|
||||
metainfo = h.read()
|
||||
h.close()
|
||||
except Exception, msg:
|
||||
logIt('Torrent Download Failed')
|
||||
logIt('ERRORMSG: %s' % str(msg))
|
||||
status = 0
|
||||
|
||||
try:
|
||||
dl = DL(metainfo, config)
|
||||
dl.run()
|
||||
if globals()['torrentStatus'] == 1:
|
||||
logIt('Completed Download: %s' % saveName)
|
||||
status = 1
|
||||
else:
|
||||
logIt('Torrent Download Failed')
|
||||
status = 0
|
||||
except Exception, msg:
|
||||
logIt('Torrent Download Failed')
|
||||
logIt('ERRORMSG: %s' % str(msg))
|
||||
status = 0
|
||||
|
||||
return status, saveName
|
||||
|
||||
def displayProgress(block_count, block_size, total_size):
|
||||
|
||||
if globals()['lastDLSize'] < (float(block_count*block_size)/1024):
|
||||
printMSG(";;1;;1;;%.2f;;%.2f" % (float(total_size)/1024, float(block_count*block_size)/1024))
|
||||
globals()['lastDLSize'] = float(block_count*block_size)/1024 + iPXSettings.lastDLStepSize
|
||||
|
||||
|
||||
def getFile(feedName, url, saveLocation, saveName, userName=None, password=''):
|
||||
import tempfile, shutil, re, urllib2, os
|
||||
|
||||
saveName = stringCleaning(saveName)
|
||||
if not userName == None:
|
||||
import urlparse
|
||||
setOpener(urlparse.urlparse(url)[1], userName, password)
|
||||
else:
|
||||
setOpener()
|
||||
|
||||
status = 1
|
||||
if iPXSettings.useProxyServer and userName != None:
|
||||
try:
|
||||
status, url, saveName = getFileViaProxySSL(url, userName, password, True, False)
|
||||
printMSG('%s: %s' % (feedName, saveName))
|
||||
status, tmpFileName, r = getFileViaProxySSL(url, userName, password, False, True)
|
||||
|
||||
if not status == 200:
|
||||
status = 0
|
||||
return 0, saveName
|
||||
except Exception, msg:
|
||||
logIt('Download Failed')
|
||||
logIt (str(msg))
|
||||
return 0, saveName
|
||||
|
||||
if os.path.isfile(tmpFileName):
|
||||
if not r.getheader('Content-Disposition') == None:
|
||||
if re.search('filename=', r.getheader('Content-Disposition'), re.IGNORECASE):
|
||||
textSplit = r.getheader('Content-Disposition')
|
||||
textSplit = textSplit.split(';')
|
||||
for text in textSplit:
|
||||
if re.search('filename=', text, re.IGNORECASE):
|
||||
logIt('Detected New Filename To Use:')
|
||||
newSaveNameSplit = text.split('=')
|
||||
newSaveName = newSaveNameSplit[len(newSaveNameSplit) -1]
|
||||
newSaveName = newSaveName.replace('"', '')
|
||||
logIt(str(newSaveName))
|
||||
shutil.copy(tmpFileName, saveLocation + '/' + newSaveName)
|
||||
saveName = newSaveName
|
||||
elif re.search('=', saveName, re.IGNORECASE):
|
||||
saveNameSplit = saveName.split('=')
|
||||
if len(saveNameSplit[(len(saveNameSplit)-1)]) > 1:
|
||||
newSaveName = saveNameSplit[(len(saveNameSplit)-1)]
|
||||
shutil.move(tmpFileName, saveLocation + '/' + newSaveName)
|
||||
saveName = newSaveName
|
||||
else:
|
||||
shutil.move(tmpFileName, saveLocation + '/' + saveName)
|
||||
else:
|
||||
shutil.move(tmpFileName, saveLocation + '/' + saveName)
|
||||
else:
|
||||
logIt('%s: %s' % (feedName, url))
|
||||
logIt('Saving to: %s/' % saveLocation)
|
||||
|
||||
checkDir(saveLocation)
|
||||
saveNameParts = saveName.split('?')
|
||||
if len(saveNameParts) > 1:
|
||||
for part in saveNameParts:
|
||||
if part.find('.') > 0:
|
||||
if len(part[part.find('.'):])-1 == 3:
|
||||
saveName = part
|
||||
saveName = stringCleaning(saveName)
|
||||
printMSG('%s: %s' % (feedName, saveName))
|
||||
|
||||
try:
|
||||
url = url.strip()
|
||||
url = url.replace(' ', '%20')
|
||||
logIt(url)
|
||||
req = urllib2.Request(url)
|
||||
try:
|
||||
h = urllib2.urlopen(req)
|
||||
except IOError, e:
|
||||
logIt(str(e))
|
||||
return 0, saveName
|
||||
|
||||
#page = ''
|
||||
count = 0
|
||||
n = 1024 # number of bytes to read at a time
|
||||
fileSize = 0
|
||||
if h.info().has_key('Content-Length'):
|
||||
fileSize = float(h.info()['Content-Length']) / 1024
|
||||
printMSG(';;1;;1;;100.00;;0.00')
|
||||
tmpFile = tempfile.mkstemp()
|
||||
tmpFileName = tmpFile[1]
|
||||
f = open(tmpFileName, 'ab')
|
||||
while True:
|
||||
a = h.read(n)
|
||||
f.write(a)
|
||||
if not a: break
|
||||
count += len(a) # len(a) may not be same as n for final read
|
||||
try:
|
||||
percentDone = ((float(count) /1024) / fileSize) * 100
|
||||
except:
|
||||
percentDone = fileSize
|
||||
if percentDone >= globals()['lastDLSize'] + 1:
|
||||
globals()['lastDLSize'] = percentDone
|
||||
printMSG(';;1;;1;;100.00;;%.2f' % (percentDone))
|
||||
|
||||
printMSG(';;1;;1;;100.00;;100.00')
|
||||
f.close()
|
||||
os.close(tmpFile[0])
|
||||
|
||||
except Exception, msg:
|
||||
logIt('Download Failed')
|
||||
logIt (str(msg))
|
||||
status = 0
|
||||
|
||||
try:
|
||||
if h.info().has_key('Content-Disposition'):
|
||||
if re.search('filename=', h.info()['Content-Disposition'], re.IGNORECASE):
|
||||
textSplit = h.info()['Content-Disposition'].split(';')
|
||||
for text in textSplit:
|
||||
if re.search('filename=', text, re.IGNORECASE):
|
||||
logIt('Detected New Filename To Use:')
|
||||
newSaveNameSplit = text.split('=')
|
||||
newSaveName = newSaveNameSplit[len(newSaveNameSplit) -1]
|
||||
newSaveName = newSaveName.replace('"', '')
|
||||
logIt(str(newSaveName))
|
||||
shutil.copy(tmpFileName, saveLocation + '/' + newSaveName)
|
||||
saveName = newSaveName
|
||||
elif re.search('=', saveName, re.IGNORECASE):
|
||||
saveNameSplit = saveName.split('=')
|
||||
if len(saveNameSplit[(len(saveNameSplit)-1)]) > 1:
|
||||
newSaveName = saveNameSplit[(len(saveNameSplit)-1)]
|
||||
shutil.move(tmpFileName, saveLocation + '/' + newSaveName)
|
||||
saveName = newSaveName
|
||||
else:
|
||||
shutil.move(tmpFileName, saveLocation + '/' + saveName)
|
||||
else:
|
||||
shutil.move(tmpFileName, saveLocation + '/' + saveName)
|
||||
except Exception, msg:
|
||||
logIt('Filename/Move Error: %s' % str(msg))
|
||||
logIt('Retrying move with no filename detection...')
|
||||
try:
|
||||
shutil.move(tmpFileName, saveLocation + '/' + saveName)
|
||||
except Exception, msg:
|
||||
logIt('Retry Error: %s' % str(msg))
|
||||
|
||||
if os.path.isfile(saveLocation + '/' + saveName):
|
||||
logIt('Completed Download: %s' % saveName)
|
||||
status = 1
|
||||
else:
|
||||
logIt('Download Failed')
|
||||
status = 0
|
||||
|
||||
return status, saveName
|
||||
|
||||
def downloadFile(url,fileType,saveDir,saveName,folderName,customGenre,convertToAAC,makeBookmarkable,userName=None,password=''):
|
||||
import shutil, re, os, sys
|
||||
|
||||
iPXID = ''
|
||||
location = ''
|
||||
status = 1
|
||||
importProg = ''
|
||||
feedName = folderName
|
||||
globals()['lastDLSize'] = 0.0
|
||||
globals()['lastTorrentBeat'] = 0
|
||||
globals()['torrentStatus'] = 1
|
||||
|
||||
if re.search('torrent', fileType, re.IGNORECASE):
|
||||
newSaveName = saveName[0:len(saveName) - 8]
|
||||
status, newSaveName = getTorrent(feedName, url, iPXSettings.torrentMaxUpRate, iPXSettings.tmpDownloadDir, newSaveName)
|
||||
|
||||
if status == 1:
|
||||
checkDir(saveDir)
|
||||
tmpSize = os.path.getsize(iPXSettings.tmpDownloadDir + '/' + newSaveName)
|
||||
shutil.copy(iPXSettings.tmpDownloadDir + '/' + newSaveName, saveDir + '/' + newSaveName)
|
||||
os.unlink(iPXSettings.tmpDownloadDir + '/' + newSaveName)
|
||||
fileType = detectFileType(saveDir + '/' + newSaveName)
|
||||
|
||||
# Audio Files
|
||||
if re.search('audio', fileType, re.IGNORECASE):
|
||||
if (iPXSettings.moveAudio > 0):
|
||||
importProg = 'iTunes'
|
||||
iPXID, importProg = updatePlaylist(saveDir, newSaveName, folderName, customGenre, convertToAAC, makeBookmarkable)
|
||||
|
||||
# Image Files
|
||||
elif re.search('image', fileType, re.IGNORECASE):
|
||||
if (iPXSettings.moveImages > 0):
|
||||
importProg = 'iPhoto'
|
||||
iPXID, importProg = updateiPhoto(saveDir, newSaveName, folderName, 0)
|
||||
|
||||
# Video Files
|
||||
if (iPXSettings.moveVideo > 0):
|
||||
if sys.platform == 'darwin':
|
||||
if re.search('video/quicktime', fileType, re.IGNORECASE) or re.search('video/mpeg', fileType, re.IGNORECASE):
|
||||
importProg = 'iTunes'
|
||||
iPXID, importProg = updatePlaylist(saveDir, newSaveName, folderName, customGenre, False, False)
|
||||
elif sys.platform == 'win32':
|
||||
if iPXSettings.Prefs['exportApp'] == 1:
|
||||
if re.search('video', fileType, re.IGNORECASE):
|
||||
if not re.search('video/quicktime', fileType, re.IGNORECASE):
|
||||
importProg = 'WMP'
|
||||
iPXID, importProg = updatePlaylist(saveDir, newSaveName, folderName, customGenre, False, False)
|
||||
else:
|
||||
if re.search('video/quicktime', fileType, re.IGNORECASE) or re.search('video/mpeg', fileType, re.IGNORECASE):
|
||||
importProg = 'iTunes'
|
||||
iPXID, importProg = updatePlaylist(saveDir, newSaveName, folderName, customGenre, False, False)
|
||||
|
||||
# HTML Files are bad!
|
||||
elif re.search('html', fileType, re.IGNORECASE):
|
||||
if os.path.isfile(saveDir + '/' + newSaveName):
|
||||
os.unlink(saveDir + '/' + newSaveName)
|
||||
return newSaveName, '', '', '', 0
|
||||
|
||||
elif re.search('data', fileType, re.IGNORECASE):
|
||||
if (re.search('mp3$', newSaveName, re.IGNORECASE)):
|
||||
if (iPXSettings.moveAudio > 0):
|
||||
importProg = 'iTunes'
|
||||
iPXID, importProg = updatePlaylist(saveDir, newSaveName, folderName, customGenre, convertToAAC, makeBookmarkable)
|
||||
elif (re.search('mov$', newSaveName, re.IGNORECASE)):
|
||||
if (iPXSettings.moveVideo > 0):
|
||||
importProg = 'iTunes'
|
||||
iPXID, importProg = updatePlaylist(saveDir, newSaveName, folderName, customGenre, False, False)
|
||||
elif (re.search('aa$', newSaveName,
|
||||
IGNORECASE)):
|
||||
if (iPXSettings.moveVideo > 0):
|
||||
importProg = 'iTunes'
|
||||
iPXID, importProg = updatePlaylist(saveDir, newSaveName, folderName, customGenre, False, False)
|
||||
|
||||
# All Other Data Types
|
||||
else:
|
||||
pass
|
||||
|
||||
saveName = newSaveName
|
||||
|
||||
if len(iPXID) > 0:
|
||||
logIt('Returning with iPXID: %s' % str(iPXID))
|
||||
return saveName, iPXID, importProg, fileType, status
|
||||
else:
|
||||
status, saveName = getFile(feedName, url, iPXSettings.tmpDownloadDir, saveName, userName, password)
|
||||
if status == 1:
|
||||
if len(fileType) <= 0 or re.search('text', fileType, re.IGNORECASE) or re.search('octet', fileType, re.IGNORECASE) or re.search('html', fileType, re.IGNORECASE) or re.search('data', fileType, re.IGNORECASE):
|
||||
fileType = detectFileType(iPXSettings.tmpDownloadDir + '/' + saveName)
|
||||
if not re.search('torrent', fileType, re.IGNORECASE):
|
||||
try:
|
||||
checkDir(saveDir)
|
||||
shutil.copy(iPXSettings.tmpDownloadDir + '/' + saveName, saveDir + '/' + saveName)
|
||||
os.unlink(iPXSettings.tmpDownloadDir + '/' + saveName)
|
||||
except Exception, msg:
|
||||
LogIt('ERRMSG: %s', msg)
|
||||
# Torrent Files
|
||||
if re.search('torrent', fileType, re.IGNORECASE):
|
||||
if os.path.isfile(saveDir + '/' + saveName):
|
||||
os.unlink(saveDir + '/' + saveName)
|
||||
# get rid of torrent extension
|
||||
if re.search('.torrent$', saveName, re.IGNORECASE):
|
||||
newSaveName = saveName[0:len(saveName) - 8]
|
||||
else:
|
||||
newSaveName = saveName
|
||||
|
||||
status, newSaveName = getTorrent(feedName, url, 0, iPXSettings.tmpDownloadDir, newSaveName)
|
||||
|
||||
if status == 1:
|
||||
checkDir(saveDir)
|
||||
shutil.copy(iPXSettings.tmpDownloadDir + '/' + newSaveName, saveDir + '/' + newSaveName)
|
||||
os.unlink(iPXSettings.tmpDownloadDir + '/' + newSaveName)
|
||||
fileType = detectFileType(saveDir + '/' + newSaveName)
|
||||
|
||||
# Audio Files
|
||||
if re.search('audio', fileType, re.IGNORECASE):
|
||||
if (iPXSettings.moveAudio > 0):
|
||||
importProg = 'iTunes'
|
||||
iPXID, importProg = updatePlaylist(saveDir, newSaveName, folderName, customGenre, convertToAAC, makeBookmarkable)
|
||||
|
||||
# Image Files
|
||||
elif re.search('image', fileType, re.IGNORECASE):
|
||||
if (iPXSettings.moveImages > 0):
|
||||
importProg = 'iPhoto'
|
||||
iPXID, importProg = updateiPhoto(saveDir, newSaveName, folderName, 0)
|
||||
|
||||
# Video Files
|
||||
if (iPXSettings.moveVideo > 0):
|
||||
if sys.platform == 'darwin':
|
||||
if re.search('video/quicktime', fileType, re.IGNORECASE) or re.search('video/mpeg', fileType, re.IGNORECASE):
|
||||
importProg = 'iTunes'
|
||||
iPXID, importProg = updatePlaylist(saveDir, newSaveName, folderName, customGenre, False, False)
|
||||
elif sys.platform == 'win32':
|
||||
if iPXSettings.Prefs['exportApp'] == 1:
|
||||
if re.search('video', fileType, re.IGNORECASE):
|
||||
if not re.search('video/quicktime', fileType, re.IGNORECASE):
|
||||
importProg = 'WMP'
|
||||
iPXID, importProg = updatePlaylist(saveDir, newSaveName, folderName, customGenre, False, False)
|
||||
else:
|
||||
if re.search('video/quicktime', fileType, re.IGNORECASE) or re.search('video/mpeg', fileType, re.IGNORECASE):
|
||||
importProg = 'iTunes'
|
||||
iPXID, importProg = updatePlaylist(saveDir, newSaveName, folderName, customGenre, False, False)
|
||||
|
||||
# HTML Files are bad!
|
||||
elif re.search('html', fileType, re.IGNORECASE):
|
||||
if os.path.isfile(saveDir + '/' + newSaveName):
|
||||
os.unlink(saveDir + '/' + newSaveName)
|
||||
return newSaveName, '', '', '', 0
|
||||
|
||||
elif re.search('data', fileType, re.IGNORECASE):
|
||||
if (re.search('mp3$', newSaveName, re.IGNORECASE)):
|
||||
if (iPXSettings.moveAudio > 0):
|
||||
importProg = 'iTunes'
|
||||
iPXID, importProg = updatePlaylist(saveDir, newSaveName, folderName, customGenre, convertToAAC, makeBookmarkable)
|
||||
elif (re.search('mov$', newSaveName, re.IGNORECASE)) or (re.search('wmv$', newSaveName, re.IGNORECASE)):
|
||||
if (iPXSettings.moveVideo > 0):
|
||||
importProg = 'iTunes'
|
||||
iPXID, importProg = updatePlaylist(saveDir, newSaveName, folderName, customGenre, False, False)
|
||||
|
||||
elif (re.search('aa$', newSaveName, re.IGNORECASE)):
|
||||
if (iPXSettings.moveVideo > 0):
|
||||
importProg = 'iTunes'
|
||||
iPXID, importProg = updatePlaylist(saveDir, newSaveName, folderName, customGenre, False, False)
|
||||
|
||||
# All Other Data Types
|
||||
else:
|
||||
pass
|
||||
|
||||
# Audio Files
|
||||
elif re.search('audio', fileType, re.IGNORECASE):
|
||||
if (iPXSettings.moveAudio > 0):
|
||||
importProg = 'iTunes'
|
||||
iPXID, importProg = updatePlaylist(saveDir, saveName, folderName, customGenre, convertToAAC, makeBookmarkable)
|
||||
|
||||
# Image Files
|
||||
elif re.search('image', fileType, re.IGNORECASE):
|
||||
if (iPXSettings.moveImages > 0):
|
||||
importProg = 'iPhoto'
|
||||
iPXID, importProg = updateiPhoto(saveDir, saveName, folderName, 0)
|
||||
|
||||
# Video Files
|
||||
if (iPXSettings.moveVideo > 0):
|
||||
if sys.platform == 'darwin':
|
||||
if re.search('video/quicktime', fileType, re.IGNORECASE) or re.search('video/mpeg', fileType, re.IGNORECASE):
|
||||
importProg = 'iTunes'
|
||||
iPXID, importProg = updatePlaylist(saveDir, saveName, folderName, customGenre, False, False)
|
||||
elif sys.platform == 'win32':
|
||||
if iPXSettings.Prefs['exportApp'] == 1:
|
||||
if re.search('video', fileType, re.IGNORECASE):
|
||||
if not re.search('video/quicktime', fileType, re.IGNORECASE):
|
||||
importProg = 'WMP'
|
||||
iPXID, importProg = updatePlaylist(saveDir, saveName, folderName, customGenre, False, False)
|
||||
else:
|
||||
if re.search('video/quicktime', fileType, re.IGNORECASE):
|
||||
importProg = 'iTunes'
|
||||
iPXID, importProg = updatePlaylist(saveDir, saveName, folderName, customGenre, False, False)
|
||||
|
||||
# HTML Files are bad!
|
||||
elif re.search('html', fileType, re.IGNORECASE):
|
||||
if os.path.isfile(saveDir + '/' + saveName):
|
||||
os.unlink(saveDir + '/' + saveName)
|
||||
return saveName, '', '', '', 0
|
||||
|
||||
elif re.search('data', fileType, re.IGNORECASE):
|
||||
if (re.search('mp3$', saveName, re.IGNORECASE)):
|
||||
if (iPXSettings.moveAudio > 0):
|
||||
importProg = 'iTunes'
|
||||
iPXID, importProg = updatePlaylist(saveDir, saveName, folderName, customGenre, convertToAAC, makeBookmarkable)
|
||||
elif (re.search('mov$', saveName, re.IGNORECASE)):
|
||||
if (iPXSettings.moveVideo > 0):
|
||||
importProg = 'iTunes'
|
||||
iPXID, importProg = updatePlaylist(saveDir, saveName, folderName, customGenre, False, False)
|
||||
|
||||
elif (re.search('aa$', saveName, re.IGNORECASE)):
|
||||
if (iPXSettings.moveVideo > 0):
|
||||
importProg = 'iTunes'
|
||||
iPXID, importProg = updatePlaylist(saveDir, saveName, folderName, customGenre, False, False)
|
||||
|
||||
|
||||
# All Other Data Types
|
||||
else:
|
||||
pass
|
||||
|
||||
logIt('Returning with iPXID: %s' % str(iPXID))
|
||||
return saveName, iPXID, importProg, fileType, status
|
32
iPXFileSwap.py
Executable file
32
iPXFileSwap.py
Executable file
@ -0,0 +1,32 @@
|
||||
#(c) 2004-2008 Thunderstone Media, LLC
|
||||
#Creative Commons Attribution-Noncommercial 3.0 United States License
|
||||
#
|
||||
#Python Developyment By:
|
||||
#
|
||||
#Ray Slakinski
|
||||
#August Trometer
|
||||
|
||||
import iPXSettings
|
||||
from iPXTools import *
|
||||
|
||||
iPXSettings.progName = 'iPodderX'
|
||||
for arg in sys.argv:
|
||||
if re.search('-progName', arg, re.IGNORECASE):
|
||||
progNameSplit = arg.split('==')
|
||||
iPXSettings.progName = progNameSplit[len(progNameSplit)-1]
|
||||
iPXSettings.initSettings()
|
||||
|
||||
def usage():
|
||||
print '\r\nUsage: none really... It just swaps two files'
|
||||
#print '\r\nUsage:\r\n%s -d <textToDecrypt>\r\n%s -e <textToEncrypt>\r\n' % (sys.argv[0],sys.argv[0])
|
||||
|
||||
try:
|
||||
if sys.argv[1] == '-d':
|
||||
print decrypt(sys.argv[2].strip())
|
||||
elif sys.argv[1] == '-e':
|
||||
print encrypt(sys.argv[2].strip())
|
||||
else:
|
||||
usage()
|
||||
except Exception, msg:
|
||||
print msg
|
||||
usage()
|
218
iPXQuotaManager.py
Executable file
218
iPXQuotaManager.py
Executable file
@ -0,0 +1,218 @@
|
||||
#(c) 2004-2008 Thunderstone Media, LLC
|
||||
#Creative Commons Attribution-Noncommercial 3.0 United States License
|
||||
#
|
||||
#Python Developyment By:
|
||||
#
|
||||
#Ray Slakinski
|
||||
#August Trometer
|
||||
|
||||
import iPXSettings
|
||||
from iPXTools import *
|
||||
from types import *
|
||||
|
||||
class iPXError(Exception): pass
|
||||
class OutOfRangeError(iPXError): pass
|
||||
class NotIntegerError(iPXError): pass
|
||||
|
||||
def qmCacheCreate():
|
||||
import iPXClass
|
||||
|
||||
printMSG('Creating SmartSpace Cache File...')
|
||||
try:
|
||||
feedsObj = iPXClass.Feeds()
|
||||
feedsObj.createQMCacheBridge()
|
||||
except Exception, msg:
|
||||
logIt('Create qmcache.dat failed...')
|
||||
logIt('ERRMSG: ' + str(msg))
|
||||
|
||||
def writeQuotaCache(feedID, guid, enc, encSize):
|
||||
import os, pickle
|
||||
|
||||
rec = [feedID, guid, enc, encSize]
|
||||
cacheFile = iPXSettings.rssPath + 'qmcache.dat'
|
||||
cacheData = []
|
||||
|
||||
if os.path.isfile(cacheFile):
|
||||
cacheData = pickle.load(open(cacheFile))
|
||||
cacheData.append(rec)
|
||||
pickle.dump(cacheData, open(cacheFile,'w'))
|
||||
else:
|
||||
cacheData = [rec]
|
||||
pickle.dump(cacheData, open(cacheFile,'w'))
|
||||
|
||||
def remCacheItem(enc):
|
||||
import os, pickle
|
||||
|
||||
cacheFile = iPXSettings.rssPath + 'qmcache.dat'
|
||||
checkDir(iPXSettings.tempDir + '/iPXCache/')
|
||||
dailyCacheFile = iPXSettings.tempDir + '/iPXCache/qmcache-' + strftime('%m%d%Y',localtime()) + '.dat'
|
||||
|
||||
if os.path.isfile(cacheFile):
|
||||
cacheData = pickle.load(open(cacheFile))
|
||||
delList = []
|
||||
enc = enc.replace(iPXSettings.downloadDirectory + '/', '')
|
||||
for rec in cacheData:
|
||||
if enc in rec[2]:
|
||||
delList.append(rec)
|
||||
logIt('Removing qmcache.dat record: ' + str(rec))
|
||||
|
||||
for rec in delList:
|
||||
cacheData.remove(rec)
|
||||
pickle.dump(cacheData, open(cacheFile,'w'))
|
||||
|
||||
if os.path.isfile(dailyCacheFile):
|
||||
delList = []
|
||||
dailyCacheItems = pickle.load(open(dailyCacheFile))
|
||||
for item in dailyCacheItems:
|
||||
if enc in item:
|
||||
delList.append(item)
|
||||
|
||||
for rec in delList:
|
||||
dailyCacheItems.remove(rec)
|
||||
pickle.dump(dailyCacheItems, open(dailyCacheFile,'w'))
|
||||
|
||||
def readQuotaCache():
|
||||
import os, pickle
|
||||
|
||||
cacheFile = iPXSettings.rssPath + 'qmcache.dat'
|
||||
cacheData = []
|
||||
|
||||
if os.path.isfile(cacheFile):
|
||||
cacheData = pickle.load(open(cacheFile))
|
||||
|
||||
return cacheData
|
||||
|
||||
def getAllowedDelList():
|
||||
import os, iPXClass, pickle
|
||||
|
||||
sizeTaken = 0
|
||||
allowList = []
|
||||
tempAllowList = {}
|
||||
finalAllowList = []
|
||||
checkDir(iPXSettings.tempDir + '/iPXCache/')
|
||||
dailyCacheFie = iPXSettings.tempDir + '/iPXCache/qmcache-' + strftime('%m%d%Y',localtime()) + '.dat'
|
||||
|
||||
if not os.path.isfile(iPXSettings.rssPath + 'qmcache.dat'):
|
||||
qmCacheCreate()
|
||||
if not os.path.isfile(dailyCacheFie):
|
||||
cacheList = readQuotaCache()
|
||||
logIt('Number of items in qmcache.dat file: ' + str(len(cacheList)))
|
||||
|
||||
feedsObj = iPXClass.Feeds()
|
||||
for cacheItem in cacheList:
|
||||
sizeTaken = sizeTaken + int(cacheItem[3])
|
||||
hashFile = iPXSettings.rssPath + 'feedData/' + cacheItem[0] + '.ipxd'
|
||||
if os.path.isfile(hashFile):
|
||||
try:
|
||||
EntriesData = plistlib.Plist.fromFile(file(hashFile))
|
||||
if EntreiesData.has_key('entries'):
|
||||
for entry in EntriesData['entries']:
|
||||
if entry.guid == cacheItem[1]:
|
||||
if entry.has_key('read'):
|
||||
if entry['read'] == True:
|
||||
if entry.has_key('flagged'):
|
||||
if entry['flagged'] == False:
|
||||
allowList.append(cacheItem[2])
|
||||
else:
|
||||
allowList.append(cacheItem[2])
|
||||
else:
|
||||
allowList.append(cacheItem[2])
|
||||
except Exception, msg:
|
||||
logIt('getAllowedDelList failed...')
|
||||
logIt('ERRMSG: ' + str(msg))
|
||||
else:
|
||||
allowList.append(cacheItem[2])
|
||||
pickle.dump(allowList, open(dailyCacheFie,'w'))
|
||||
else:
|
||||
allowList = pickle.load(open(dailyCacheFie))
|
||||
|
||||
counter = 0
|
||||
for enc in allowList:
|
||||
encFile = os.path.join(iPXSettings.downloadDirectory + '/' + enc)
|
||||
if os.path.isfile(encFile):
|
||||
fileCreateTime = os.path.getmtime(encFile)
|
||||
tempAllowList[fileCreateTime + counter] = encFile
|
||||
sizeTaken = sizeTaken + os.path.getsize(encFile)
|
||||
counter = counter + 1
|
||||
else:
|
||||
remCacheItem(enc)
|
||||
|
||||
keys = tempAllowList.keys()
|
||||
keys.sort()
|
||||
for key in keys:
|
||||
finalAllowList.append(tempAllowList[key])
|
||||
|
||||
logIt('Number of items in final del allow list: ' + str(len(finalAllowList)))
|
||||
|
||||
return finalAllowList, sizeTaken
|
||||
|
||||
def getToQuota(size):
|
||||
import os, shutil, sys
|
||||
|
||||
if size == '':
|
||||
size = 0
|
||||
elif size == None:
|
||||
size = 0
|
||||
elif type(size) == str:
|
||||
try:
|
||||
size = long(size)
|
||||
except Exception, msg:
|
||||
logIt('getToQuota size error: ' + str(msg))
|
||||
logIt('Size = ' + str(size))
|
||||
size = 0
|
||||
elif size < 0:
|
||||
raise OutOfRangeError, 'number out of range (must be positive whole number)'
|
||||
|
||||
if len(iPXSettings.delList) > 0:
|
||||
plusSize = 1048576 * 50
|
||||
size = long(size) + plusSize
|
||||
|
||||
logIt('QM: ' + str(iPXSettings.quotaSize))
|
||||
logIt('QM: ' + str(size))
|
||||
logIt('QM: =============')
|
||||
logIt('QM: ' + str(iPXSettings.quotaSize - (iPXSettings.quotaSizeTaken + size)))
|
||||
|
||||
if (iPXSettings.quotaSize - (iPXSettings.quotaSizeTaken + size)) <= 0:
|
||||
for enc in iPXSettings.delList:
|
||||
if os.path.isfile(enc):
|
||||
logIt('QM: Removing "' + enc + '"')
|
||||
remCacheItem(enc)
|
||||
iPXSettings.quotaSizeTaken = iPXSettings.quotaSizeTaken - long(os.path.getsize(enc))
|
||||
logIt('quotaSizeTaken: ' + str(iPXSettings.quotaSizeTaken))
|
||||
if sys.platform == 'darwin':
|
||||
encNameSplit = enc.split('/')
|
||||
encName = encNameSplit[len(encNameSplit)-1]
|
||||
shutil.move(enc, iPXSettings.userPath + '/.Trash/' + encName)
|
||||
elif sys.platform == 'win32':
|
||||
os.unlink(enc)
|
||||
if (iPXSettings.quotaSize - (iPXSettings.quotaSizeTaken + size)) > 0:
|
||||
iPXSettings.quotaSizeTaken = iPXSettings.quotaSizeTaken + size
|
||||
break
|
||||
else:
|
||||
logIt('QM: delList holds ' + str(len(iPXSettings.delList)) + ' files, no action taken')
|
||||
|
||||
def main():
|
||||
import sys
|
||||
|
||||
iPXSettings.initSettings()
|
||||
iPXSettings.DEBUG = 1
|
||||
iPXSettings.delList, iPXSettings.sizeTaken = getAllowedDelList()
|
||||
|
||||
size = 0
|
||||
|
||||
try:
|
||||
size = sys.argv[1]
|
||||
except:
|
||||
printMSG('Please secify a whole number ')
|
||||
sys.exit(0)
|
||||
|
||||
try:
|
||||
size = long(size)
|
||||
except Exception, msg:
|
||||
printMSG('Invalid param "' + str(size) + '", please use a whole number')
|
||||
sys.exit(0)
|
||||
|
||||
getToQuota(size)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
241
iPXSettings.py
Executable file
241
iPXSettings.py
Executable file
@ -0,0 +1,241 @@
|
||||
#(c) 2004-2008 Thunderstone Media, LLC
|
||||
#Creative Commons Attribution-Noncommercial 3.0 United States License
|
||||
#
|
||||
#Python Developyment By:
|
||||
#
|
||||
#Ray Slakinski
|
||||
#August Trometer
|
||||
|
||||
import os, sys
|
||||
from iPXTools import *
|
||||
from time import *
|
||||
|
||||
LITE = 0
|
||||
|
||||
progName = 'iPodderX'
|
||||
|
||||
#defaults
|
||||
VERSION = 'Version 3.1 Build: 35 [%s] ' % sys.platform
|
||||
USER_AGENT = '<unknown>'
|
||||
environs = os.environ
|
||||
userPath = ''
|
||||
#pList = ''
|
||||
logPath = ''
|
||||
logFile = ''
|
||||
rssPath = ''
|
||||
__timeBombFile = ''
|
||||
defaultDir = ''
|
||||
tempDir = ''
|
||||
tmpDownloadDir = ''
|
||||
historyFile = ''
|
||||
newHistoryFile = ''
|
||||
__userName = ''
|
||||
pList = ''
|
||||
feedDetails = []
|
||||
totalBTFileSize = 0.0
|
||||
ranFromUI = False
|
||||
Prefs = []
|
||||
ranQuotaTest = False
|
||||
histGUIDs = []
|
||||
ranHistCheck = False
|
||||
feedFile = ''
|
||||
FeedListPrefs = {'iPodderXFeeds':{}}
|
||||
historyURLs = []
|
||||
encGUIDs = []
|
||||
DEBUG = 0
|
||||
SUPERDEBUG = 0
|
||||
lastDLStepSize = 5
|
||||
showExplicit = True
|
||||
anonFeedback = True
|
||||
onlyAudio = 0
|
||||
moveAudio = 1
|
||||
moveVideo = 1
|
||||
moveImages = 1
|
||||
deleteAudio = 0
|
||||
deleteVideo = 0
|
||||
deleteImages = 0
|
||||
torrentFiles = 1
|
||||
torrentMinDownRate = 3000
|
||||
torrentMaxBeatTime = 1000
|
||||
torrentMaxUpRate = 80
|
||||
torrentMinPort = 6881
|
||||
torrentMaxPort = 6889
|
||||
maxEntryAge = 3
|
||||
organizeDownloads = 0
|
||||
quotaEnabled = 0
|
||||
quotaSizeTaken = 0
|
||||
quotaSize = 0
|
||||
d = defaultDir
|
||||
downloadDirectory = d
|
||||
proxyServer = ''
|
||||
proxyPort = ''
|
||||
proxyUsername = ''
|
||||
proxyPassword = ''
|
||||
useProxyServer = 0
|
||||
useProxyIE = 0
|
||||
useProxyAuth = 0
|
||||
globalProxySetting = ''
|
||||
|
||||
|
||||
delList =[]
|
||||
|
||||
#set default
|
||||
progName = 'iPodderX'
|
||||
|
||||
def initSettings():
|
||||
globals()['USER_AGENT'] = '%s/%s (http://slakinski.com)' % (progName, VERSION)
|
||||
|
||||
globals()['userPath'] = os.path.expanduser("~")
|
||||
if sys.platform == 'darwin':
|
||||
globals()['pList'] = '%s/Library/Preferences/com.thunderstonemedia.%s.plist' % (userPath, progName)
|
||||
globals()['logPath'] = '%s/Library/Logs/' % userPath
|
||||
globals()['logFile'] = '%s%s.log' % (logPath, progName)
|
||||
globals()['rssPath'] = '%s/Library/Application Support/%s/' % (userPath, progName)
|
||||
globals()['__timeBombFile'] = '%s/Library/foo.txt' % userPath
|
||||
globals()['defaultDir'] = '%s/Documents/%s' % (userPath, progName)
|
||||
globals()['tempDir'] = '/tmp/'
|
||||
globals()['tmpDownloadDir'] = '%s/%sDownloads/' % (tempDir, progName)
|
||||
globals()['historyFile'] = '%shistory.plist' % rssPath
|
||||
globals()['newHistoryFile'] = '%shistory.dat' % rssPath
|
||||
globals()['__userName'] = globals()['userPath'].split('/')[len(globals()['userPath'].split('\\'))-1]
|
||||
elif sys.platform == 'win32':
|
||||
globals()['logPath'] = '%s\\Application Data\\%s\\' % (userPath, progName)
|
||||
globals()['logFile'] = '%s%s.log' % (logPath, progName)
|
||||
globals()['rssPath'] = '%s\\Application Data\\%s\\' % (userPath, progName)
|
||||
globals()['pList'] = '%siPXSettings.plist' % rssPath
|
||||
globals()['__timeBombFile'] = '%s\\System32\\foo.txt' % environs['WINDIR']
|
||||
globals()['defaultDir'] = '%s\\My Documents\\%s Downloads\\' % (userPath, progName)
|
||||
globals()['tempDir'] = '%s\\' % environs['TEMP']
|
||||
globals()['tmpDownloadDir'] = '%s\\%sDownloads\\' % (tempDir, progName)
|
||||
globals()['historyFile'] = '%s\\history.plist' % rssPath
|
||||
globals()['newHistoryFile'] = '%s\\history.dat' % rssPath
|
||||
globals()['__userName'] = globals()['userPath'].split('\\')[len(globals()['userPath'].split('\\'))-1]
|
||||
|
||||
globals()['pList'] = '%siPXSettings.plist' % rssPath
|
||||
|
||||
globals()['__userKey'] = __userName.encode('rot-13')
|
||||
if len(__userKey) < 8:
|
||||
counter = 8 - len(__userKey)
|
||||
for x in range(counter):
|
||||
globals()['__userKey'] = '%s!' % __userKey
|
||||
|
||||
globals()['__DESKey'] = '%s3sE6$!&4' % __userKey[:8]
|
||||
|
||||
globals()['Prefs'] = readplist(pList)
|
||||
|
||||
globals()['feedFile'] = '%sfeeds.plist' % rssPath
|
||||
|
||||
for i in range(10):
|
||||
if os.path.isfile(feedFile):
|
||||
if len(FeedListPrefs['iPodderXFeeds']) <= 0:
|
||||
globals()['FeedListPrefs'] = readplist(feedFile)
|
||||
else:
|
||||
break
|
||||
|
||||
if Prefs.has_key('pyDebug'):
|
||||
globals()['DEBUG'] = int(Prefs['pyDebug'])
|
||||
|
||||
if Prefs.has_key('lastDLStepSize'):
|
||||
globals()['lastDLStepSize'] = int(Prefs['lastDLStepSize'])
|
||||
|
||||
if Prefs.has_key('showExplict'):
|
||||
globals()['showExplicit'] = Prefs['showExplicit']
|
||||
|
||||
if Prefs.has_key('anonFeedback'):
|
||||
globals()['anonFeedback'] = int(Prefs['anonFeedback'])
|
||||
|
||||
if Prefs.has_key('onlyAudio'):
|
||||
globals()['onlyAudio'] = int(Prefs['onlyAudio'])
|
||||
|
||||
if Prefs.has_key('moveAudio'):
|
||||
globals()['moveAudio'] = int(Prefs['moveAudio'])
|
||||
|
||||
if Prefs.has_key('moveVideo'):
|
||||
globals()['moveVideo'] = int(Prefs['moveVideo'])
|
||||
|
||||
if Prefs.has_key('moveImages'):
|
||||
globals()['moveImages'] = int(Prefs['moveImages'])
|
||||
|
||||
if Prefs.has_key('deleteAudio'):
|
||||
globals()['deleteAudio'] = int(Prefs['deleteAudio'])
|
||||
|
||||
if Prefs.has_key('deleteVideo'):
|
||||
globals()['deleteVideo'] = int(Prefs['deleteVideo'])
|
||||
|
||||
if Prefs.has_key('deleteImages'):
|
||||
globals()['deleteImages'] = int(Prefs['deleteImages'])
|
||||
|
||||
if Prefs.has_key('torrentFiles'):
|
||||
globals()['torrentFiles'] = int(Prefs['torrentFiles'])
|
||||
|
||||
if Prefs.has_key('torrentMinDownRate'):
|
||||
globals()['torrentMinDownRate'] = Prefs['torrentMinDownRate']
|
||||
|
||||
if Prefs.has_key('torrentMaxBeatTime'):
|
||||
globals()['torrentMaxBeatTime'] = Prefs['torrentMaxBeatTime']
|
||||
|
||||
if Prefs.has_key('torrentMaxBeatTime'):
|
||||
globals()['torrentMaxBeatTime'] = Prefs['torrentMaxBeatTime']
|
||||
|
||||
if Prefs.has_key('torrentMinPort'):
|
||||
globals()['torrentMinPort'] = Prefs['torrentMinPort']
|
||||
|
||||
if Prefs.has_key('torrentMaxPort'):
|
||||
globals()['torrentMaxPort'] = Prefs['torrentMaxPort']
|
||||
|
||||
if Prefs.has_key('maxEntryAge'):
|
||||
globals()['maxEntryAge'] = Prefs['maxEntryAge']
|
||||
|
||||
if Prefs.has_key('organizeDownloads'):
|
||||
globals()['organizeDownloads'] = int(Prefs['organizeDownloads'])
|
||||
|
||||
if Prefs.has_key('quotaEnabled'):
|
||||
globals()['quotaEnabled'] = int(Prefs['quotaEnabled'])
|
||||
|
||||
if Prefs.has_key('useProxyServer'):
|
||||
globals()['useProxyServer'] = int(Prefs['useProxyServer'])
|
||||
|
||||
if Prefs.has_key('useProxyIE'):
|
||||
globals()['useProxyIE'] = int(Prefs['useProxyIE'])
|
||||
|
||||
if Prefs.has_key('useProxyAuth'):
|
||||
globals()['useProxyAuth'] = int(Prefs['useProxyAuth'])
|
||||
|
||||
if Prefs.has_key('proxyServer'):
|
||||
globals()['proxyServer'] = Prefs['proxyServer']
|
||||
|
||||
if Prefs.has_key('proxyPort'):
|
||||
globals()['proxyPort'] = Prefs['proxyPort']
|
||||
|
||||
if Prefs.has_key('proxyUsername'):
|
||||
globals()['proxyUsername'] = Prefs['proxyUsername']
|
||||
|
||||
if Prefs.has_key('proxyPassword'):
|
||||
globals()['proxyPassword'] = Prefs['proxyPassword']
|
||||
|
||||
if Prefs.has_key('quotaSize'):
|
||||
globals()['quotaSize'] = float(Prefs['quotaSize'])
|
||||
#convert quotaSize to bytes
|
||||
globals()['quotaSize'] = quotaSize * 1073741824
|
||||
|
||||
if Prefs.has_key('downloadDirectory'):
|
||||
globals()['d'] = Prefs['downloadDirectory']
|
||||
if (globals()['d'] == ''):
|
||||
globals()['d'] = defaultDir
|
||||
globals()['d'] = d.replace('~', userPath)
|
||||
globals()['downloadDirectory'] = globals()['d']
|
||||
|
||||
globals()['delList'] =[]
|
||||
|
||||
def checkDir(dir):
|
||||
if not os.path.isdir(dir):
|
||||
os.mkdir(dir)
|
||||
|
||||
def getVar(var):
|
||||
if var == '3DESKey':
|
||||
return __DESKey
|
||||
elif var == 'timeBomb':
|
||||
return __timeBombFile
|
||||
|
||||
def setVar(var):
|
||||
return
|
1206
iPXTools.py
Executable file
1206
iPXTools.py
Executable file
File diff suppressed because it is too large
Load Diff
75
khashmir/KRateLimiter.py
Executable file
75
khashmir/KRateLimiter.py
Executable file
@ -0,0 +1,75 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
from BitTorrent.platform import bttime as time
|
||||
from BitTorrent.CurrentRateMeasure import Measure
|
||||
from const import *
|
||||
from random import randrange, shuffle
|
||||
from traceback import print_exc
|
||||
|
||||
class KRateLimiter:
|
||||
# special rate limiter that drops entries that have been sitting in the queue for longer than self.age seconds
|
||||
# by default we toss anything that has less than 5 seconds to live
|
||||
def __init__(self, transport, rate, call_later, rlcount, rate_period, age=(KRPC_TIMEOUT - 5)):
|
||||
self.q = []
|
||||
self.transport = transport
|
||||
self.rate = rate
|
||||
self.curr = 0
|
||||
self.running = False
|
||||
self.age = age
|
||||
self.last = 0
|
||||
self.call_later = call_later
|
||||
self.rlcount = rlcount
|
||||
self.measure = Measure(rate_period)
|
||||
self.sent=self.dropped=0
|
||||
if self.rate == 0:
|
||||
self.rate = 1e10
|
||||
|
||||
def sendto(self, s, i, addr):
|
||||
self.q.append((time(), (s, i, addr)))
|
||||
if not self.running:
|
||||
self.run(check=True)
|
||||
|
||||
def run(self, check=False):
|
||||
t = time()
|
||||
self.expire(t)
|
||||
self.curr -= (t - self.last) * self.rate
|
||||
self.last = t
|
||||
if check:
|
||||
self.curr = max(self.curr, 0 - self.rate)
|
||||
|
||||
shuffle(self.q)
|
||||
while self.q and self.curr <= 0:
|
||||
x, tup = self.q.pop()
|
||||
size = len(tup[0])
|
||||
self.curr += size
|
||||
try:
|
||||
self.transport.sendto(*tup)
|
||||
self.sent+=1
|
||||
self.rlcount(size)
|
||||
self.measure.update_rate(size)
|
||||
except:
|
||||
if tup[2][1] != 0:
|
||||
print ">>> sendto exception", tup
|
||||
print_exc()
|
||||
self.q.sort()
|
||||
if self.q or self.curr > 0:
|
||||
self.running = True
|
||||
# sleep for at least a half second
|
||||
self.call_later(self.run, max(self.curr / self.rate, 0.5))
|
||||
else:
|
||||
self.running = False
|
||||
|
||||
def expire(self, t=time()):
|
||||
if self.q:
|
||||
expire_time = t - self.age
|
||||
while self.q and self.q[0][0] < expire_time:
|
||||
self.q.pop(0)
|
||||
self.dropped+=1
|
0
khashmir/__init__.py
Executable file
0
khashmir/__init__.py
Executable file
349
khashmir/actions.py
Executable file
349
khashmir/actions.py
Executable file
@ -0,0 +1,349 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
from BitTorrent.platform import bttime as time
|
||||
|
||||
import const
|
||||
|
||||
from khash import intify
|
||||
from ktable import KTable, K
|
||||
from util import unpackNodes
|
||||
from krpc import KRPCProtocolError, KRPCSelfNodeError
|
||||
from bisect import insort
|
||||
|
||||
class NodeWrap(object):
|
||||
def __init__(self, node, target):
|
||||
self.num = target
|
||||
self.node = node
|
||||
|
||||
def __cmp__(self, o):
|
||||
""" this function is for sorting nodes relative to the ID we are looking for """
|
||||
x, y = self.num ^ o.num, self.num ^ self.node.num
|
||||
if x > y:
|
||||
return 1
|
||||
elif x < y:
|
||||
return -1
|
||||
return 0
|
||||
|
||||
class ActionBase(object):
|
||||
""" base class for some long running asynchronous proccesses like finding nodes or values """
|
||||
def __init__(self, table, target, callback, callLater):
|
||||
self.table = table
|
||||
self.target = target
|
||||
self.callLater = callLater
|
||||
self.num = intify(target)
|
||||
self.found = {}
|
||||
self.foundq = []
|
||||
self.queried = {}
|
||||
self.queriedip = {}
|
||||
self.answered = {}
|
||||
self.callback = callback
|
||||
self.outstanding = 0
|
||||
self.finished = 0
|
||||
|
||||
def sort(self, a, b):
|
||||
""" this function is for sorting nodes relative to the ID we are looking for """
|
||||
x, y = self.num ^ a.num, self.num ^ b.num
|
||||
if x > y:
|
||||
return 1
|
||||
elif x < y:
|
||||
return -1
|
||||
return 0
|
||||
|
||||
def shouldQuery(self, node):
|
||||
if node.id == self.table.node.id:
|
||||
return False
|
||||
elif (node.host, node.port) not in self.queriedip and node.id not in self.queried:
|
||||
self.queriedip[(node.host, node.port)] = 1
|
||||
self.queried[node.id] = 1
|
||||
return True
|
||||
return False
|
||||
|
||||
def _cleanup(self):
|
||||
self.foundq = None
|
||||
self.found = None
|
||||
self.queried = None
|
||||
self.queriedip = None
|
||||
|
||||
def goWithNodes(self, t):
|
||||
pass
|
||||
|
||||
|
||||
|
||||
FIND_NODE_TIMEOUT = 15
|
||||
|
||||
class FindNode(ActionBase):
|
||||
""" find node action merits it's own class as it is a long running stateful process """
|
||||
def handleGotNodes(self, dict):
|
||||
_krpc_sender = dict['_krpc_sender']
|
||||
dict = dict['rsp']
|
||||
sender = {'id' : dict["id"]}
|
||||
sender['port'] = _krpc_sender[1]
|
||||
sender['host'] = _krpc_sender[0]
|
||||
sender = self.table.Node().initWithDict(sender)
|
||||
try:
|
||||
l = unpackNodes(dict.get("nodes", []))
|
||||
if not self.answered.has_key(sender.id):
|
||||
self.answered[sender.id] = sender
|
||||
except:
|
||||
l = []
|
||||
self.table.invalidateNode(sender)
|
||||
|
||||
if self.finished:
|
||||
# a day late and a dollar short
|
||||
return
|
||||
self.outstanding = self.outstanding - 1
|
||||
for node in l:
|
||||
n = self.table.Node().initWithDict(node)
|
||||
if not self.found.has_key(n.id):
|
||||
self.found[n.id] = n
|
||||
insort(self.foundq, NodeWrap(n, self.num))
|
||||
self.table.insertNode(n, contacted=0)
|
||||
self.schedule()
|
||||
|
||||
def schedule(self):
|
||||
"""
|
||||
send messages to new peers, if necessary
|
||||
"""
|
||||
if self.finished:
|
||||
return
|
||||
l = [wrapper.node for wrapper in self.foundq[:K]]
|
||||
for node in l:
|
||||
if node.id == self.target:
|
||||
self.finished=1
|
||||
return self.callback([node])
|
||||
if self.shouldQuery(node):
|
||||
#xxxx t.timeout = time.time() + FIND_NODE_TIMEOUT
|
||||
try:
|
||||
df = node.findNode(self.target, self.table.node.id)
|
||||
except KRPCSelfNodeError:
|
||||
pass
|
||||
else:
|
||||
df.addCallbacks(self.handleGotNodes, self.makeMsgFailed(node))
|
||||
self.outstanding = self.outstanding + 1
|
||||
if self.outstanding >= const.CONCURRENT_REQS:
|
||||
break
|
||||
assert(self.outstanding) >=0
|
||||
if self.outstanding == 0:
|
||||
## all done!!
|
||||
self.finished=1
|
||||
self._cleanup()
|
||||
self.callLater(self.callback, 0, (l[:K],))
|
||||
|
||||
def makeMsgFailed(self, node):
|
||||
return self._defaultGotNodes
|
||||
|
||||
def _defaultGotNodes(self, err):
|
||||
self.outstanding = self.outstanding - 1
|
||||
self.schedule()
|
||||
|
||||
def goWithNodes(self, nodes):
|
||||
"""
|
||||
this starts the process, our argument is a transaction with t.extras being our list of nodes
|
||||
it's a transaction since we got called from the dispatcher
|
||||
"""
|
||||
for node in nodes:
|
||||
if node.id == self.table.node.id:
|
||||
continue
|
||||
else:
|
||||
self.found[node.id] = node
|
||||
insort(self.foundq, NodeWrap(node, self.num))
|
||||
self.schedule()
|
||||
|
||||
|
||||
get_value_timeout = 15
|
||||
class GetValue(FindNode):
|
||||
def __init__(self, table, target, callback, callLater, find="findValue"):
|
||||
FindNode.__init__(self, table, target, callback, callLater)
|
||||
self.findValue = find
|
||||
|
||||
""" get value task """
|
||||
def handleGotNodes(self, dict):
|
||||
_krpc_sender = dict['_krpc_sender']
|
||||
dict = dict['rsp']
|
||||
sender = {'id' : dict["id"]}
|
||||
sender['port'] = _krpc_sender[1]
|
||||
sender['host'] = _krpc_sender[0]
|
||||
sender = self.table.Node().initWithDict(sender)
|
||||
|
||||
if self.finished or self.answered.has_key(sender.id):
|
||||
# a day late and a dollar short
|
||||
return
|
||||
self.outstanding = self.outstanding - 1
|
||||
|
||||
self.answered[sender.id] = sender
|
||||
# go through nodes
|
||||
# if we have any closer than what we already got, query them
|
||||
if dict.has_key('nodes'):
|
||||
try:
|
||||
l = unpackNodes(dict.get('nodes',[]))
|
||||
except:
|
||||
l = []
|
||||
del(self.answered[sender.id])
|
||||
|
||||
for node in l:
|
||||
n = self.table.Node().initWithDict(node)
|
||||
if not self.found.has_key(n.id):
|
||||
self.table.insertNode(n)
|
||||
self.found[n.id] = n
|
||||
insort(self.foundq, NodeWrap(n, self.num))
|
||||
elif dict.has_key('values'):
|
||||
def x(y, z=self.results):
|
||||
if not z.has_key(y):
|
||||
z[y] = 1
|
||||
return y
|
||||
else:
|
||||
return None
|
||||
z = len(dict.get('values', []))
|
||||
v = filter(None, map(x, dict.get('values',[])))
|
||||
if(len(v)):
|
||||
self.callLater(self.callback, 0, (v,))
|
||||
self.schedule()
|
||||
|
||||
## get value
|
||||
def schedule(self):
|
||||
if self.finished:
|
||||
return
|
||||
for node in [wrapper.node for wrapper in self.foundq[:K]]:
|
||||
if self.shouldQuery(node):
|
||||
#xxx t.timeout = time.time() + GET_VALUE_TIMEOUT
|
||||
try:
|
||||
f = getattr(node, self.findValue)
|
||||
except AttributeError:
|
||||
print ">>> findValue %s doesn't have a %s method!" % (node, self.findValue)
|
||||
else:
|
||||
try:
|
||||
df = f(self.target, self.table.node.id)
|
||||
df.addCallback(self.handleGotNodes)
|
||||
df.addErrback(self.makeMsgFailed(node))
|
||||
self.outstanding = self.outstanding + 1
|
||||
self.queried[node.id] = 1
|
||||
except KRPCSelfNodeError:
|
||||
pass
|
||||
if self.outstanding >= const.CONCURRENT_REQS:
|
||||
break
|
||||
assert(self.outstanding) >=0
|
||||
if self.outstanding == 0:
|
||||
## all done, didn't find it!!
|
||||
self.finished=1
|
||||
self._cleanup()
|
||||
self.callLater(self.callback,0, ([],))
|
||||
|
||||
## get value
|
||||
def goWithNodes(self, nodes, found=None):
|
||||
self.results = {}
|
||||
if found:
|
||||
for n in found:
|
||||
self.results[n] = 1
|
||||
for node in nodes:
|
||||
if node.id == self.table.node.id:
|
||||
continue
|
||||
else:
|
||||
self.found[node.id] = node
|
||||
insort(self.foundq, NodeWrap(node, self.num))
|
||||
self.schedule()
|
||||
|
||||
|
||||
class StoreValue(ActionBase):
|
||||
def __init__(self, table, target, value, callback, callLater, store="storeValue"):
|
||||
ActionBase.__init__(self, table, target, callback, callLater)
|
||||
self.value = value
|
||||
self.stored = []
|
||||
self.store = store
|
||||
|
||||
def storedValue(self, t, node):
|
||||
self.outstanding -= 1
|
||||
if self.finished:
|
||||
return
|
||||
self.stored.append(t)
|
||||
if len(self.stored) >= const.STORE_REDUNDANCY:
|
||||
self.finished=1
|
||||
self.callback(self.stored)
|
||||
else:
|
||||
if not len(self.stored) + self.outstanding >= const.STORE_REDUNDANCY:
|
||||
self.schedule()
|
||||
return t
|
||||
|
||||
def storeFailed(self, t, node):
|
||||
self.outstanding -= 1
|
||||
if self.finished:
|
||||
return t
|
||||
self.schedule()
|
||||
return t
|
||||
|
||||
def schedule(self):
|
||||
if self.finished:
|
||||
return
|
||||
num = const.CONCURRENT_REQS - self.outstanding
|
||||
if num > const.STORE_REDUNDANCY - len(self.stored):
|
||||
num = const.STORE_REDUNDANCY - len(self.stored)
|
||||
if num == 0 and not self.finished:
|
||||
self.finished=1
|
||||
self.callback(self.stored)
|
||||
while num > 0:
|
||||
try:
|
||||
node = self.nodes.pop()
|
||||
except IndexError:
|
||||
if self.outstanding == 0:
|
||||
self.finished = 1
|
||||
self._cleanup()
|
||||
self.callback(self.stored)
|
||||
return
|
||||
else:
|
||||
if not node.id == self.table.node.id:
|
||||
try:
|
||||
f = getattr(node, self.store)
|
||||
except AttributeError:
|
||||
print ">>> %s doesn't have a %s method!" % (node, self.store)
|
||||
else:
|
||||
try:
|
||||
df = f(self.target, self.value, self.table.node.id)
|
||||
except KRPCProtocolError:
|
||||
self.table.table.invalidateNode(node)
|
||||
except KRPCSelfNodeError:
|
||||
pass
|
||||
else:
|
||||
df.addCallback(self.storedValue,(),{'node':node})
|
||||
df.addErrback(self.storeFailed, (), {'node':node})
|
||||
self.outstanding += 1
|
||||
num -= 1
|
||||
|
||||
def goWithNodes(self, nodes):
|
||||
self.nodes = nodes
|
||||
self.nodes.sort(self.sort)
|
||||
self.schedule()
|
||||
|
||||
|
||||
class GetAndStore(GetValue):
|
||||
def __init__(self, table, target, value, callback, storecallback, callLater, find="findValue", store="storeValue"):
|
||||
self.store = store
|
||||
self.value = value
|
||||
self.cb2 = callback
|
||||
self.storecallback = storecallback
|
||||
def cb(res):
|
||||
self.cb2(res)
|
||||
if not(res):
|
||||
n = StoreValue(self.table, self.target, self.value, self.doneStored, self.callLater, self.store)
|
||||
n.goWithNodes(self.answered.values())
|
||||
GetValue.__init__(self, table, target, cb, callLater, find)
|
||||
|
||||
def doneStored(self, dict):
|
||||
self.storecallback(dict)
|
||||
|
||||
class KeyExpirer:
|
||||
def __init__(self, store, callLater):
|
||||
self.store = store
|
||||
self.callLater = callLater
|
||||
self.callLater(self.doExpire, const.KEINITIAL_DELAY)
|
||||
|
||||
def doExpire(self):
|
||||
self.cut = time() - const.KE_AGE
|
||||
self.store.expire(self.cut)
|
||||
self.callLater(self.doExpire, const.KE_DELAY)
|
52
khashmir/cache.py
Executable file
52
khashmir/cache.py
Executable file
@ -0,0 +1,52 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
from BitTorrent.platform import bttime as time
|
||||
|
||||
class Cache:
|
||||
def __init__(self, touch_on_access = False):
|
||||
self.data = {}
|
||||
self.q = []
|
||||
self.touch = touch_on_access
|
||||
|
||||
def __getitem__(self, key):
|
||||
if self.touch:
|
||||
v = self.data[key][1]
|
||||
self[key] = v
|
||||
return self.data[key][1]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
t = time()
|
||||
self.data[key] = (t, value)
|
||||
self.q.insert(0, (t, key, value))
|
||||
|
||||
def __delitem__(self, key):
|
||||
del(self.data[key])
|
||||
|
||||
def has_key(self, key):
|
||||
return self.data.has_key(key)
|
||||
|
||||
def keys(self):
|
||||
return self.data.keys()
|
||||
|
||||
def expire(self, expire_time):
|
||||
try:
|
||||
while self.q[-1][0] < expire_time:
|
||||
x = self.q.pop()
|
||||
assert(x[0] < expire_time)
|
||||
try:
|
||||
t, v = self.data[x[1]]
|
||||
if v == x[2] and t == x[0]:
|
||||
del(self.data[x[1]])
|
||||
except KeyError:
|
||||
pass
|
||||
except IndexError:
|
||||
pass
|
||||
|
65
khashmir/const.py
Executable file
65
khashmir/const.py
Executable file
@ -0,0 +1,65 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# magic id to use before we know a peer's id
|
||||
NULL_ID = 20 * '\0'
|
||||
|
||||
# Kademlia "K" constant, this should be an even number
|
||||
K = 8
|
||||
|
||||
# SHA1 is 160 bits long
|
||||
HASH_LENGTH = 160
|
||||
|
||||
# checkpoint every this many seconds
|
||||
CHECKPOINT_INTERVAL = 60 * 5 # five minutes
|
||||
|
||||
# how often to find our own nodes
|
||||
FIND_CLOSE_INTERVAL = 60 * 15 # fifteen minutes
|
||||
|
||||
### SEARCHING/STORING
|
||||
# concurrent krpc calls per find node/value request!
|
||||
CONCURRENT_REQS = K
|
||||
|
||||
# how many hosts to post to
|
||||
STORE_REDUNDANCY = 3
|
||||
|
||||
|
||||
### ROUTING TABLE STUFF
|
||||
# how many times in a row a node can fail to respond before it's booted from the routing table
|
||||
MAX_FAILURES = 3
|
||||
|
||||
# never ping a node more often than this
|
||||
MIN_PING_INTERVAL = 60 * 15 # fifteen minutes
|
||||
|
||||
# refresh buckets that haven't been touched in this long
|
||||
BUCKET_STALENESS = 60 * 15 # fifteen minutes
|
||||
|
||||
|
||||
### KEY EXPIRER
|
||||
# time before expirer starts running
|
||||
KEINITIAL_DELAY = 15 # 15 seconds - to clean out old stuff in persistent db
|
||||
|
||||
# time between expirer runs
|
||||
KE_DELAY = 60 * 5 # 5 minutes
|
||||
|
||||
# expire entries older than this
|
||||
KE_AGE = 60 * 30 # 30 minutes
|
||||
|
||||
|
||||
## krpc
|
||||
KRPC_TIMEOUT = 20
|
||||
|
||||
KRPC_ERROR = 1
|
||||
KRPC_ERROR_METHOD_UNKNOWN = 2
|
||||
KRPC_ERROR_RECEIVED_UNKNOWN = 3
|
||||
KRPC_ERROR_TIMEOUT = 4
|
||||
KRPC_SOCKET_ERROR = 5
|
||||
|
||||
KRPC_CONNECTION_CACHE_TIME = KRPC_TIMEOUT * 2
|
37
khashmir/hammerlock.py
Executable file
37
khashmir/hammerlock.py
Executable file
@ -0,0 +1,37 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
INTERVAL = 60
|
||||
PERIODS = 5
|
||||
|
||||
class Hammerlock:
|
||||
def __init__(self, rate, call_later):
|
||||
self.rate = rate
|
||||
self.call_later = call_later
|
||||
self.curr = 0
|
||||
self.buckets = [{} for x in range(PERIODS)]
|
||||
self.call_later(self._cycle, INTERVAL)
|
||||
|
||||
def _cycle(self):
|
||||
self.curr = (self.curr + 1) % PERIODS
|
||||
self.buckets[self.curr] = {}
|
||||
self.call_later(self._cycle, INTERVAL)
|
||||
|
||||
def check(self, addr):
|
||||
x = self.buckets[self.curr].get(addr, 0) + 1
|
||||
self.buckets[self.curr][addr] = x
|
||||
x = 0
|
||||
for bucket in self.buckets:
|
||||
x += bucket.get(addr, 0)
|
||||
if x >= self.rate:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
49
khashmir/inserter.py
Executable file
49
khashmir/inserter.py
Executable file
@ -0,0 +1,49 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
### generate a bunch of nodes that use a single contact point
|
||||
usage = "usage: inserter.py <contact host> <contact port>"
|
||||
|
||||
from utkhashmir import UTKhashmir
|
||||
from BitTorrent.RawServer_magic import RawServer
|
||||
from BitTorrent.defaultargs import common_options, rare_options
|
||||
from khashmir.khash import newID
|
||||
from random import randrange
|
||||
from threading import Event
|
||||
import sys, os
|
||||
|
||||
from khashmir.krpc import KRPC
|
||||
KRPC.noisy = 1
|
||||
global done
|
||||
done = 0
|
||||
def d(n):
|
||||
global done
|
||||
done = done+1
|
||||
|
||||
if __name__=="__main__":
|
||||
global done
|
||||
host, port = sys.argv[1:]
|
||||
x = UTKhashmir("", 22038, "/tmp/cgcgcgc")
|
||||
x.addContact(host, int(port))
|
||||
x.rawserver.listen_once()
|
||||
x.findCloseNodes(d)
|
||||
while not done:
|
||||
x.rawserver.listen_once()
|
||||
l = []
|
||||
for i in range(10):
|
||||
k = newID()
|
||||
v = randrange(10000,20000)
|
||||
l.append((k, v))
|
||||
x.announcePeer(k, v, d)
|
||||
done = 1
|
||||
while done < 10:
|
||||
x.rawserver.listen_once(1)
|
||||
for k,v in l:
|
||||
print ">>>", `k`, v
|
120
khashmir/khash.py
Executable file
120
khashmir/khash.py
Executable file
@ -0,0 +1,120 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
from sha import sha
|
||||
from random import randint
|
||||
|
||||
#this is ugly, hopefully os.entropy will be in 2.4
|
||||
try:
|
||||
from entropy import entropy
|
||||
except ImportError:
|
||||
def entropy(n):
|
||||
s = ''
|
||||
for i in range(n):
|
||||
s += chr(randint(0,255))
|
||||
return s
|
||||
|
||||
def intify(hstr):
|
||||
"""20 bit hash, big-endian -> long python integer"""
|
||||
assert len(hstr) == 20
|
||||
return long(hstr.encode('hex'), 16)
|
||||
|
||||
def stringify(num):
|
||||
"""long int -> 20-character string"""
|
||||
str = hex(num)[2:]
|
||||
if str[-1] == 'L':
|
||||
str = str[:-1]
|
||||
if len(str) % 2 != 0:
|
||||
str = '0' + str
|
||||
str = str.decode('hex')
|
||||
return (20 - len(str)) *'\x00' + str
|
||||
|
||||
def distance(a, b):
|
||||
"""distance between two 160-bit hashes expressed as 20-character strings"""
|
||||
return intify(a) ^ intify(b)
|
||||
|
||||
|
||||
def newID():
|
||||
"""returns a new pseudorandom globally unique ID string"""
|
||||
h = sha()
|
||||
h.update(entropy(20))
|
||||
return h.digest()
|
||||
|
||||
def newIDInRange(min, max):
|
||||
return stringify(randRange(min,max))
|
||||
|
||||
def randRange(min, max):
|
||||
return min + intify(newID()) % (max - min)
|
||||
|
||||
def newTID():
|
||||
return randRange(-2**30, 2**30)
|
||||
|
||||
### Test Cases ###
|
||||
import unittest
|
||||
|
||||
class NewID(unittest.TestCase):
|
||||
def testLength(self):
|
||||
self.assertEqual(len(newID()), 20)
|
||||
def testHundreds(self):
|
||||
for x in xrange(100):
|
||||
self.testLength
|
||||
|
||||
class Intify(unittest.TestCase):
|
||||
known = [('\0' * 20, 0),
|
||||
('\xff' * 20, 2L**160 - 1),
|
||||
]
|
||||
def testKnown(self):
|
||||
for str, value in self.known:
|
||||
self.assertEqual(intify(str), value)
|
||||
def testEndianessOnce(self):
|
||||
h = newID()
|
||||
while h[-1] == '\xff':
|
||||
h = newID()
|
||||
k = h[:-1] + chr(ord(h[-1]) + 1)
|
||||
self.assertEqual(intify(k) - intify(h), 1)
|
||||
def testEndianessLots(self):
|
||||
for x in xrange(100):
|
||||
self.testEndianessOnce()
|
||||
|
||||
class Disantance(unittest.TestCase):
|
||||
known = [
|
||||
(("\0" * 20, "\xff" * 20), 2**160L -1),
|
||||
((sha("foo").digest(), sha("foo").digest()), 0),
|
||||
((sha("bar").digest(), sha("bar").digest()), 0)
|
||||
]
|
||||
def testKnown(self):
|
||||
for pair, dist in self.known:
|
||||
self.assertEqual(distance(pair[0], pair[1]), dist)
|
||||
def testCommutitive(self):
|
||||
for i in xrange(100):
|
||||
x, y, z = newID(), newID(), newID()
|
||||
self.assertEqual(distance(x,y) ^ distance(y, z), distance(x, z))
|
||||
|
||||
class RandRange(unittest.TestCase):
|
||||
def testOnce(self):
|
||||
a = intify(newID())
|
||||
b = intify(newID())
|
||||
if a < b:
|
||||
c = randRange(a, b)
|
||||
self.assertEqual(a <= c < b, 1, "output out of range %d %d %d" % (b, c, a))
|
||||
else:
|
||||
c = randRange(b, a)
|
||||
assert b <= c < a, "output out of range %d %d %d" % (b, c, a)
|
||||
|
||||
def testOneHundredTimes(self):
|
||||
for i in xrange(100):
|
||||
self.testOnce()
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
|
442
khashmir/khashmir.py
Executable file
442
khashmir/khashmir.py
Executable file
@ -0,0 +1,442 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
import const
|
||||
from socket import gethostbyname
|
||||
|
||||
from BitTorrent.platform import bttime as time
|
||||
|
||||
from sha import sha
|
||||
import re
|
||||
from BitTorrent.defaultargs import common_options, rare_options
|
||||
from BitTorrent.RawServer_magic import RawServer
|
||||
|
||||
from ktable import KTable, K
|
||||
from knode import *
|
||||
from kstore import KStore
|
||||
from khash import newID, newIDInRange
|
||||
|
||||
from util import packNodes
|
||||
from actions import FindNode, GetValue, KeyExpirer, StoreValue
|
||||
import krpc
|
||||
|
||||
import sys
|
||||
import os
|
||||
import traceback
|
||||
|
||||
from BitTorrent.bencode import bencode, bdecode
|
||||
|
||||
from BitTorrent.defer import Deferred
|
||||
from random import randrange
|
||||
from kstore import sample
|
||||
|
||||
from threading import Event, Thread
|
||||
|
||||
ip_pat = re.compile('[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')
|
||||
|
||||
class KhashmirDBExcept(Exception):
|
||||
pass
|
||||
|
||||
def foo(bytes):
|
||||
pass
|
||||
|
||||
# this is the base class, has base functionality and find node, no key-value mappings
|
||||
class KhashmirBase:
|
||||
_Node = KNodeBase
|
||||
def __init__(self, host, port, data_dir, rawserver=None, max_ul_rate=1024, checkpoint=True, errfunc=None, rlcount=foo, config={'pause':False, 'max_rate_period':20}):
|
||||
if rawserver:
|
||||
self.rawserver = rawserver
|
||||
else:
|
||||
self.flag = Event()
|
||||
d = dict([(x[0],x[1]) for x in common_options + rare_options])
|
||||
self.rawserver = RawServer(self.flag, d)
|
||||
self.max_ul_rate = max_ul_rate
|
||||
self.socket = None
|
||||
self.config = config
|
||||
self.setup(host, port, data_dir, rlcount, checkpoint)
|
||||
|
||||
def setup(self, host, port, data_dir, rlcount, checkpoint=True):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.ddir = data_dir
|
||||
self.store = KStore()
|
||||
self.pingcache = {}
|
||||
self.socket = self.rawserver.create_udpsocket(self.port, self.host, False)
|
||||
self.udp = krpc.hostbroker(self, (self.host, self.port), self.socket, self.rawserver.add_task, self.max_ul_rate, self.config, rlcount)
|
||||
self._load()
|
||||
self.rawserver.start_listening_udp(self.socket, self.udp)
|
||||
self.last = time()
|
||||
KeyExpirer(self.store, self.rawserver.add_task)
|
||||
self.refreshTable(force=1)
|
||||
if checkpoint:
|
||||
self.rawserver.add_task(self.findCloseNodes, 30, (lambda a: a, True))
|
||||
self.rawserver.add_task(self.checkpoint, 60, (1,))
|
||||
|
||||
def Node(self):
|
||||
n = self._Node(self.udp.connectionForAddr)
|
||||
n.table = self
|
||||
return n
|
||||
|
||||
def __del__(self):
|
||||
if self.socket is not None:
|
||||
self.rawserver.stop_listening_udp(self.socket)
|
||||
self.socket.close()
|
||||
|
||||
def _load(self):
|
||||
do_load = False
|
||||
try:
|
||||
s = open(os.path.join(self.ddir, "routing_table"), 'r').read()
|
||||
dict = bdecode(s)
|
||||
except:
|
||||
id = newID()
|
||||
else:
|
||||
id = dict['id']
|
||||
do_load = True
|
||||
|
||||
self.node = self._Node(self.udp.connectionForAddr).init(id, self.host, self.port)
|
||||
self.table = KTable(self.node)
|
||||
if do_load:
|
||||
self._loadRoutingTable(dict['rt'])
|
||||
|
||||
|
||||
def checkpoint(self, auto=0):
|
||||
d = {}
|
||||
d['id'] = self.node.id
|
||||
d['rt'] = self._dumpRoutingTable()
|
||||
try:
|
||||
f = open(os.path.join(self.ddir, "routing_table"), 'wb')
|
||||
f.write(bencode(d))
|
||||
f.close()
|
||||
except:
|
||||
#XXX real error here
|
||||
print ">>> unable to dump routing table!", str(e)
|
||||
pass
|
||||
|
||||
|
||||
if auto:
|
||||
self.rawserver.add_task(self.checkpoint,
|
||||
randrange(int(const.CHECKPOINT_INTERVAL * .9),
|
||||
int(const.CHECKPOINT_INTERVAL * 1.1)),
|
||||
(1,))
|
||||
|
||||
def _loadRoutingTable(self, nodes):
|
||||
"""
|
||||
load routing table nodes from database
|
||||
it's usually a good idea to call refreshTable(force=1) after loading the table
|
||||
"""
|
||||
for rec in nodes:
|
||||
n = self.Node().initWithDict(rec)
|
||||
self.table.insertNode(n, contacted=0, nocheck=True)
|
||||
|
||||
def _dumpRoutingTable(self):
|
||||
"""
|
||||
save routing table nodes to the database
|
||||
"""
|
||||
l = []
|
||||
for bucket in self.table.buckets:
|
||||
for node in bucket.l:
|
||||
l.append({'id':node.id, 'host':node.host, 'port':node.port, 'age':int(node.age)})
|
||||
return l
|
||||
|
||||
|
||||
def _addContact(self, host, port, callback=None):
|
||||
"""
|
||||
ping this node and add the contact info to the table on pong!
|
||||
"""
|
||||
n =self.Node().init(const.NULL_ID, host, port)
|
||||
try:
|
||||
self.sendPing(n, callback=callback)
|
||||
except krpc.KRPCSelfNodeError:
|
||||
# our own node
|
||||
pass
|
||||
|
||||
|
||||
#######
|
||||
####### LOCAL INTERFACE - use these methods!
|
||||
def addContact(self, ip, port, callback=None):
|
||||
"""
|
||||
ping this node and add the contact info to the table on pong!
|
||||
"""
|
||||
if ip_pat.match(ip):
|
||||
self._addContact(ip, port)
|
||||
else:
|
||||
def go(ip=ip, port=port):
|
||||
ip = gethostbyname(ip)
|
||||
self.rawserver.external_add_task(self._addContact, 0, (ip, port))
|
||||
t = Thread(target=go)
|
||||
t.start()
|
||||
|
||||
|
||||
## this call is async!
|
||||
def findNode(self, id, callback, errback=None):
|
||||
""" returns the contact info for node, or the k closest nodes, from the global table """
|
||||
# get K nodes out of local table/cache, or the node we want
|
||||
nodes = self.table.findNodes(id, invalid=True)
|
||||
l = [x for x in nodes if x.invalid]
|
||||
if len(l) > 4:
|
||||
nodes = sample(l , 4) + self.table.findNodes(id, invalid=False)[:4]
|
||||
|
||||
d = Deferred()
|
||||
if errback:
|
||||
d.addCallbacks(callback, errback)
|
||||
else:
|
||||
d.addCallback(callback)
|
||||
if len(nodes) == 1 and nodes[0].id == id :
|
||||
d.callback(nodes)
|
||||
else:
|
||||
# create our search state
|
||||
state = FindNode(self, id, d.callback, self.rawserver.add_task)
|
||||
self.rawserver.external_add_task(state.goWithNodes, 0, (nodes,))
|
||||
|
||||
def insertNode(self, n, contacted=1):
|
||||
"""
|
||||
insert a node in our local table, pinging oldest contact in bucket, if necessary
|
||||
|
||||
If all you have is a host/port, then use addContact, which calls this method after
|
||||
receiving the PONG from the remote node. The reason for the seperation is we can't insert
|
||||
a node into the table without it's peer-ID. That means of course the node passed into this
|
||||
method needs to be a properly formed Node object with a valid ID.
|
||||
"""
|
||||
old = self.table.insertNode(n, contacted=contacted)
|
||||
if old and old != n:
|
||||
if not old.inPing():
|
||||
self.checkOldNode(old, n, contacted)
|
||||
else:
|
||||
l = self.pingcache.get(old.id, [])
|
||||
if len(l) < 10 or contacted:
|
||||
l.append((n, contacted))
|
||||
self.pingcache[old.id] = l
|
||||
|
||||
|
||||
|
||||
def checkOldNode(self, old, new, contacted=False):
|
||||
## these are the callbacks used when we ping the oldest node in a bucket
|
||||
|
||||
def cmp(a, b):
|
||||
if a[1] == 1 and b[1] == 0:
|
||||
return -1
|
||||
elif b[1] == 1 and a[1] == 0:
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
def _staleNodeHandler(dict, old=old, new=new, contacted=contacted):
|
||||
""" called if the pinged node never responds """
|
||||
if old.fails >= 2:
|
||||
l = self.pingcache.get(old.id, [])
|
||||
l.sort(cmp)
|
||||
if l:
|
||||
n, nc = l[0]
|
||||
if (not contacted) and nc:
|
||||
l = l[1:] + [(new, contacted)]
|
||||
new = n
|
||||
contacted = nc
|
||||
o = self.table.replaceStaleNode(old, new)
|
||||
if o and o != new:
|
||||
self.checkOldNode(o, new)
|
||||
try:
|
||||
self.pingcache[o.id] = self.pingcache[old.id]
|
||||
del(self.pingcache[old.id])
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
if l:
|
||||
del(self.pingcache[old.id])
|
||||
l.sort(cmp)
|
||||
for node in l:
|
||||
self.insertNode(node[0], node[1])
|
||||
else:
|
||||
l = self.pingcache.get(old.id, [])
|
||||
if l:
|
||||
del(self.pingcache[old.id])
|
||||
self.insertNode(new, contacted)
|
||||
for node in l:
|
||||
self.insertNode(node[0], node[1])
|
||||
|
||||
def _notStaleNodeHandler(dict, old=old, new=new, contacted=contacted):
|
||||
""" called when we get a pong from the old node """
|
||||
self.table.insertNode(old, True)
|
||||
self.insertNode(new, contacted)
|
||||
l = self.pingcache.get(old.id, [])
|
||||
l.sort(cmp)
|
||||
for node in l:
|
||||
self.insertNode(node[0], node[1])
|
||||
try:
|
||||
del(self.pingcache[old.id])
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
df = old.ping(self.node.id)
|
||||
except krpc.KRPCSelfNodeError:
|
||||
pass
|
||||
df.addCallbacks(_notStaleNodeHandler, _staleNodeHandler)
|
||||
|
||||
def sendPing(self, node, callback=None):
|
||||
"""
|
||||
ping a node
|
||||
"""
|
||||
try:
|
||||
df = node.ping(self.node.id)
|
||||
except krpc.KRPCSelfNodeError:
|
||||
pass
|
||||
else:
|
||||
## these are the callbacks we use when we issue a PING
|
||||
def _pongHandler(dict, node=node, table=self.table, callback=callback):
|
||||
_krpc_sender = dict['_krpc_sender']
|
||||
dict = dict['rsp']
|
||||
sender = {'id' : dict['id']}
|
||||
sender['host'] = _krpc_sender[0]
|
||||
sender['port'] = _krpc_sender[1]
|
||||
n = self.Node().initWithDict(sender)
|
||||
table.insertNode(n)
|
||||
if callback:
|
||||
callback()
|
||||
def _defaultPong(err, node=node, table=self.table, callback=callback):
|
||||
if callback:
|
||||
callback()
|
||||
|
||||
df.addCallbacks(_pongHandler,_defaultPong)
|
||||
|
||||
def findCloseNodes(self, callback=lambda a: a, auto=False):
|
||||
"""
|
||||
This does a findNode on the ID one away from our own.
|
||||
This will allow us to populate our table with nodes on our network closest to our own.
|
||||
This is called as soon as we start up with an empty table
|
||||
"""
|
||||
if not self.config['pause']:
|
||||
id = self.node.id[:-1] + chr((ord(self.node.id[-1]) + 1) % 256)
|
||||
self.findNode(id, callback)
|
||||
if auto:
|
||||
if not self.config['pause']:
|
||||
self.refreshTable()
|
||||
self.rawserver.external_add_task(self.findCloseNodes, randrange(int(const.FIND_CLOSE_INTERVAL *0.9),
|
||||
int(const.FIND_CLOSE_INTERVAL *1.1)), (lambda a: True, True))
|
||||
|
||||
def refreshTable(self, force=0):
|
||||
"""
|
||||
force=1 will refresh table regardless of last bucket access time
|
||||
"""
|
||||
def callback(nodes):
|
||||
pass
|
||||
|
||||
refresh = [bucket for bucket in self.table.buckets if force or (len(bucket.l) < K) or len(filter(lambda a: a.invalid, bucket.l)) or (time() - bucket.lastAccessed > const.BUCKET_STALENESS)]
|
||||
for bucket in refresh:
|
||||
id = newIDInRange(bucket.min, bucket.max)
|
||||
self.findNode(id, callback)
|
||||
|
||||
def stats(self):
|
||||
"""
|
||||
Returns (num_contacts, num_nodes)
|
||||
num_contacts: number contacts in our routing table
|
||||
num_nodes: number of nodes estimated in the entire dht
|
||||
"""
|
||||
num_contacts = reduce(lambda a, b: a + len(b.l), self.table.buckets, 0)
|
||||
num_nodes = const.K * (2**(len(self.table.buckets) - 1))
|
||||
return {'num_contacts':num_contacts, 'num_nodes':num_nodes}
|
||||
|
||||
def krpc_ping(self, id, _krpc_sender):
|
||||
sender = {'id' : id}
|
||||
sender['host'] = _krpc_sender[0]
|
||||
sender['port'] = _krpc_sender[1]
|
||||
n = self.Node().initWithDict(sender)
|
||||
self.insertNode(n, contacted=0)
|
||||
return {"id" : self.node.id}
|
||||
|
||||
def krpc_find_node(self, target, id, _krpc_sender):
|
||||
nodes = self.table.findNodes(target, invalid=False)
|
||||
nodes = map(lambda node: node.senderDict(), nodes)
|
||||
sender = {'id' : id}
|
||||
sender['host'] = _krpc_sender[0]
|
||||
sender['port'] = _krpc_sender[1]
|
||||
n = self.Node().initWithDict(sender)
|
||||
self.insertNode(n, contacted=0)
|
||||
return {"nodes" : packNodes(nodes), "id" : self.node.id}
|
||||
|
||||
|
||||
## This class provides read-only access to the DHT, valueForKey
|
||||
## you probably want to use this mixin and provide your own write methods
|
||||
class KhashmirRead(KhashmirBase):
|
||||
_Node = KNodeRead
|
||||
def retrieveValues(self, key):
|
||||
try:
|
||||
l = self.store[key]
|
||||
except KeyError:
|
||||
l = []
|
||||
return l
|
||||
## also async
|
||||
def valueForKey(self, key, callback, searchlocal = 1):
|
||||
""" returns the values found for key in global table
|
||||
callback will be called with a list of values for each peer that returns unique values
|
||||
final callback will be an empty list - probably should change to 'more coming' arg
|
||||
"""
|
||||
nodes = self.table.findNodes(key)
|
||||
|
||||
# get locals
|
||||
if searchlocal:
|
||||
l = self.retrieveValues(key)
|
||||
if len(l) > 0:
|
||||
self.rawserver.external_add_task(callback, 0, (l,))
|
||||
else:
|
||||
l = []
|
||||
|
||||
# create our search state
|
||||
state = GetValue(self, key, callback, self.rawserver.add_task)
|
||||
self.rawserver.external_add_task(state.goWithNodes, 0, (nodes, l))
|
||||
|
||||
def krpc_find_value(self, key, id, _krpc_sender):
|
||||
sender = {'id' : id}
|
||||
sender['host'] = _krpc_sender[0]
|
||||
sender['port'] = _krpc_sender[1]
|
||||
n = self.Node().initWithDict(sender)
|
||||
self.insertNode(n, contacted=0)
|
||||
|
||||
l = self.retrieveValues(key)
|
||||
if len(l) > 0:
|
||||
return {'values' : l, "id": self.node.id}
|
||||
else:
|
||||
nodes = self.table.findNodes(key, invalid=False)
|
||||
nodes = map(lambda node: node.senderDict(), nodes)
|
||||
return {'nodes' : packNodes(nodes), "id": self.node.id}
|
||||
|
||||
### provides a generic write method, you probably don't want to deploy something that allows
|
||||
### arbitrary value storage
|
||||
class KhashmirWrite(KhashmirRead):
|
||||
_Node = KNodeWrite
|
||||
## async, callback indicates nodes we got a response from (but no guarantee they didn't drop it on the floor)
|
||||
def storeValueForKey(self, key, value, callback=None):
|
||||
""" stores the value for key in the global table, returns immediately, no status
|
||||
in this implementation, peers respond but don't indicate status to storing values
|
||||
a key can have many values
|
||||
"""
|
||||
def _storeValueForKey(nodes, key=key, value=value, response=callback , table=self.table):
|
||||
if not response:
|
||||
# default callback
|
||||
def _storedValueHandler(sender):
|
||||
pass
|
||||
response=_storedValueHandler
|
||||
action = StoreValue(self, key, value, response, self.rawserver.add_task)
|
||||
self.rawserver.external_add_task(action.goWithNodes, 0, (nodes,))
|
||||
|
||||
# this call is asynch
|
||||
self.findNode(key, _storeValueForKey)
|
||||
|
||||
def krpc_store_value(self, key, value, id, _krpc_sender):
|
||||
t = "%0.6f" % time()
|
||||
self.store[key] = value
|
||||
sender = {'id' : id}
|
||||
sender['host'] = _krpc_sender[0]
|
||||
sender['port'] = _krpc_sender[1]
|
||||
n = self.Node().initWithDict(sender)
|
||||
self.insertNode(n, contacted=0)
|
||||
return {"id" : self.node.id}
|
||||
|
||||
# the whole shebang, for testing
|
||||
class Khashmir(KhashmirWrite):
|
||||
_Node = KNodeWrite
|
76
khashmir/knet.py
Executable file
76
khashmir/knet.py
Executable file
@ -0,0 +1,76 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
#
|
||||
# knet.py
|
||||
# create a network of khashmir nodes
|
||||
# usage: knet.py <num_nodes> <start_port> <ip_address>
|
||||
|
||||
from khashmir import Khashmir
|
||||
from random import randrange
|
||||
import sys, os
|
||||
|
||||
class Network:
|
||||
def __init__(self, size=0, startport=5555, localip='127.0.0.1'):
|
||||
self.num = size
|
||||
self.startport = startport
|
||||
self.localip = localip
|
||||
|
||||
def _done(self, val):
|
||||
self.done = 1
|
||||
|
||||
def setUp(self):
|
||||
self.kfiles()
|
||||
self.l = []
|
||||
for i in range(self.num):
|
||||
self.l.append(Khashmir('', self.startport + i, '/tmp/kh%s.db' % (self.startport + i)))
|
||||
reactor.iterate()
|
||||
reactor.iterate()
|
||||
|
||||
for i in self.l:
|
||||
i.addContact(self.localip, self.l[randrange(0,self.num)].port)
|
||||
i.addContact(self.localip, self.l[randrange(0,self.num)].port)
|
||||
i.addContact(self.localip, self.l[randrange(0,self.num)].port)
|
||||
reactor.iterate()
|
||||
reactor.iterate()
|
||||
reactor.iterate()
|
||||
|
||||
for i in self.l:
|
||||
self.done = 0
|
||||
i.findCloseNodes(self._done)
|
||||
while not self.done:
|
||||
reactor.iterate()
|
||||
for i in self.l:
|
||||
self.done = 0
|
||||
i.findCloseNodes(self._done)
|
||||
while not self.done:
|
||||
reactor.iterate()
|
||||
|
||||
def tearDown(self):
|
||||
for i in self.l:
|
||||
i.listenport.stopListening()
|
||||
self.kfiles()
|
||||
|
||||
def kfiles(self):
|
||||
for i in range(self.startport, self.startport+self.num):
|
||||
try:
|
||||
os.unlink('/tmp/kh%s.db' % i)
|
||||
except:
|
||||
pass
|
||||
|
||||
reactor.iterate()
|
||||
|
||||
if __name__ == "__main__":
|
||||
n = Network(int(sys.argv[1]), int(sys.argv[2]), sys.argv[3])
|
||||
n.setUp()
|
||||
try:
|
||||
reactor.run()
|
||||
finally:
|
||||
n.tearDown()
|
82
khashmir/knode.py
Executable file
82
khashmir/knode.py
Executable file
@ -0,0 +1,82 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
from node import Node
|
||||
from BitTorrent.defer import Deferred
|
||||
from const import NULL_ID
|
||||
from krpc import KRPCProtocolError
|
||||
|
||||
class IDChecker:
|
||||
def __init__(self, id):
|
||||
self.id = id
|
||||
|
||||
class KNodeBase(Node):
|
||||
def __init__(self, cfa):
|
||||
Node.__init__(self)
|
||||
self.cfa = cfa
|
||||
|
||||
def conn(self):
|
||||
return self.cfa((self.host, self.port))
|
||||
|
||||
def checkSender(self, dict):
|
||||
try:
|
||||
senderid = dict['rsp']['id']
|
||||
except KeyError:
|
||||
raise KRPCProtocolError, "No peer id in response."
|
||||
else:
|
||||
if self.id != NULL_ID and senderid != self.id:
|
||||
self.table.table.invalidateNode(self)
|
||||
else:
|
||||
if self.id == NULL_ID:
|
||||
self.id = senderid
|
||||
self.table.insertNode(self, contacted=1)
|
||||
return dict
|
||||
|
||||
def errBack(self, err):
|
||||
self.table.table.nodeFailed(self)
|
||||
return err
|
||||
|
||||
def ping(self, id):
|
||||
df = self.conn().sendRequest('ping', {"id":id})
|
||||
self.conn().pinging = True
|
||||
def endping(x):
|
||||
self.conn().pinging = False
|
||||
return x
|
||||
df.addCallbacks(endping, endping)
|
||||
df.addCallbacks(self.checkSender, self.errBack)
|
||||
return df
|
||||
|
||||
def findNode(self, target, id):
|
||||
df = self.conn().sendRequest('find_node', {"target" : target, "id": id})
|
||||
df.addErrback(self.errBack)
|
||||
df.addCallback(self.checkSender)
|
||||
return df
|
||||
|
||||
def inPing(self):
|
||||
return self.conn().pinging
|
||||
|
||||
class KNodeRead(KNodeBase):
|
||||
def findValue(self, key, id):
|
||||
df = self.conn().sendRequest('find_value', {"key" : key, "id" : id})
|
||||
df.addErrback(self.errBack)
|
||||
df.addCallback(self.checkSender)
|
||||
return df
|
||||
|
||||
class KNodeWrite(KNodeRead):
|
||||
def storeValue(self, key, value, id):
|
||||
df = self.conn().sendRequest('store_value', {"key" : key, "value" : value, "id": id})
|
||||
df.addErrback(self.errBack)
|
||||
df.addCallback(self.checkSender)
|
||||
return df
|
||||
def storeValues(self, key, value, id):
|
||||
df = self.conn().sendRequest('store_values', {"key" : key, "values" : value, "id": id})
|
||||
df.addErrback(self.errBack)
|
||||
df.addCallback(self.checkSender)
|
||||
return df
|
243
khashmir/krpc.py
Executable file
243
khashmir/krpc.py
Executable file
@ -0,0 +1,243 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
from BitTorrent.defer import Deferred
|
||||
from BitTorrent.bencode import bencode, bdecode
|
||||
import socket
|
||||
from BitTorrent.RawServer_magic import Handler
|
||||
from BitTorrent.platform import bttime
|
||||
import time
|
||||
from math import log10
|
||||
|
||||
import sys
|
||||
from traceback import print_exc
|
||||
|
||||
from khash import distance
|
||||
from cache import Cache
|
||||
from KRateLimiter import KRateLimiter
|
||||
from hammerlock import Hammerlock
|
||||
|
||||
from const import *
|
||||
|
||||
# commands
|
||||
TID = 't'
|
||||
REQ = 'q'
|
||||
RSP = 'r'
|
||||
TYP = 'y'
|
||||
ARG = 'a'
|
||||
ERR = 'e'
|
||||
|
||||
class KRPCFailSilently(Exception):
|
||||
pass
|
||||
|
||||
class KRPCProtocolError(Exception):
|
||||
pass
|
||||
|
||||
class KRPCServerError(Exception):
|
||||
pass
|
||||
|
||||
class KRPCSelfNodeError(Exception):
|
||||
pass
|
||||
|
||||
class hostbroker(Handler):
|
||||
def __init__(self, server, addr, transport, call_later, max_ul_rate, config, rlcount):
|
||||
self.server = server
|
||||
self.addr = addr
|
||||
self.transport = transport
|
||||
self.rltransport = KRateLimiter(transport, max_ul_rate, call_later, rlcount, config['max_rate_period'])
|
||||
self.call_later = call_later
|
||||
self.connections = Cache(touch_on_access=True)
|
||||
self.hammerlock = Hammerlock(100, call_later)
|
||||
self.expire_connections(loop=True)
|
||||
self.config = config
|
||||
if not self.config.has_key('pause'):
|
||||
self.config['pause'] = False
|
||||
|
||||
def expire_connections(self, loop=False):
|
||||
self.connections.expire(bttime() - KRPC_CONNECTION_CACHE_TIME)
|
||||
if loop:
|
||||
self.call_later(self.expire_connections, KRPC_CONNECTION_CACHE_TIME, (True,))
|
||||
|
||||
def data_came_in(self, addr, datagram):
|
||||
#if addr != self.addr:
|
||||
if not self.config['pause'] and self.hammerlock.check(addr):
|
||||
c = self.connectionForAddr(addr)
|
||||
c.datagramReceived(datagram, addr)
|
||||
|
||||
def connection_lost(self, socket):
|
||||
## this is like, bad
|
||||
print ">>> connection lost!", socket
|
||||
|
||||
def connectionForAddr(self, addr):
|
||||
if addr == self.addr:
|
||||
raise KRPCSelfNodeError()
|
||||
if not self.connections.has_key(addr):
|
||||
conn = KRPC(addr, self.server, self.transport, self.rltransport, self.call_later)
|
||||
self.connections[addr] = conn
|
||||
else:
|
||||
conn = self.connections[addr]
|
||||
return conn
|
||||
|
||||
|
||||
## connection
|
||||
class KRPC:
|
||||
noisy = 0
|
||||
def __init__(self, addr, server, transport, rltransport, call_later):
|
||||
self.call_later = call_later
|
||||
self.transport = transport
|
||||
self.rltransport = rltransport
|
||||
self.factory = server
|
||||
self.addr = addr
|
||||
self.tids = {}
|
||||
self.mtid = 0
|
||||
self.pinging = False
|
||||
|
||||
def sendErr(self, addr, tid, msg):
|
||||
## send error
|
||||
out = bencode({TID:tid, TYP:ERR, ERR :msg})
|
||||
olen = len(out)
|
||||
self.rltransport.sendto(out, 0, addr)
|
||||
return olen
|
||||
|
||||
def datagramReceived(self, str, addr):
|
||||
# bdecode
|
||||
try:
|
||||
msg = bdecode(str)
|
||||
except Exception, e:
|
||||
if self.noisy:
|
||||
print "response decode error: " + `e`, `str`
|
||||
else:
|
||||
#if self.noisy:
|
||||
# print msg
|
||||
# look at msg type
|
||||
if msg[TYP] == REQ:
|
||||
ilen = len(str)
|
||||
# if request
|
||||
# tell factory to handle
|
||||
f = getattr(self.factory ,"krpc_" + msg[REQ], None)
|
||||
msg[ARG]['_krpc_sender'] = self.addr
|
||||
if f and callable(f):
|
||||
try:
|
||||
ret = apply(f, (), msg[ARG])
|
||||
except KRPCFailSilently:
|
||||
pass
|
||||
except KRPCServerError, e:
|
||||
olen = self.sendErr(addr, msg[TID], "Server Error: %s" % e.args[0])
|
||||
except KRPCProtocolError, e:
|
||||
olen = self.sendErr(addr, msg[TID], "Protocol Error: %s" % e.args[0])
|
||||
except Exception, e:
|
||||
print_exc(20)
|
||||
olen = self.sendErr(addr, msg[TID], "Server Error")
|
||||
else:
|
||||
if ret:
|
||||
# make response
|
||||
out = bencode({TID : msg[TID], TYP : RSP, RSP : ret})
|
||||
else:
|
||||
out = bencode({TID : msg[TID], TYP : RSP, RSP : {}})
|
||||
# send response
|
||||
olen = len(out)
|
||||
self.rltransport.sendto(out, 0, addr)
|
||||
|
||||
else:
|
||||
if self.noisy:
|
||||
#print "don't know about method %s" % msg[REQ]
|
||||
pass
|
||||
# unknown method
|
||||
out = bencode({TID:msg[TID], TYP:ERR, ERR : KRPC_ERROR_METHOD_UNKNOWN})
|
||||
olen = len(out)
|
||||
self.rltransport.sendto(out, 0, addr)
|
||||
if self.noisy:
|
||||
try:
|
||||
ndist = 10 * log10(2**160 * 1.0 / distance(self.factory.node.id, msg[ARG]['id']))
|
||||
ndist = int(ndist)
|
||||
except OverflowError:
|
||||
ndist = 999
|
||||
|
||||
h = None
|
||||
if msg[ARG].has_key('target'):
|
||||
h = msg[ARG]['target']
|
||||
elif msg[ARG].has_key('info_hash'):
|
||||
h = msg[ARG]['info_hash']
|
||||
else:
|
||||
tdist = '-'
|
||||
|
||||
if h != None:
|
||||
try:
|
||||
tdist = 10 * log10(2**160 * 1.0 / distance(self.factory.node.id, h))
|
||||
tdist = int(tdist)
|
||||
except OverflowError:
|
||||
tdist = 999
|
||||
|
||||
t = time.localtime()
|
||||
t = "%2d-%2d-%2d %2d:%2d:%2d" % (t[0], t[1], t[2], t[3], t[4], t[5])
|
||||
print "%s %s %s >>> %s - %s %s %s - %s %s" % (t,
|
||||
msg[ARG]['id'].encode('base64')[:4],
|
||||
addr,
|
||||
self.factory.node.port,
|
||||
ilen,
|
||||
msg[REQ],
|
||||
olen,
|
||||
ndist,
|
||||
tdist)
|
||||
elif msg[TYP] == RSP:
|
||||
# if response
|
||||
# lookup tid
|
||||
if self.tids.has_key(msg[TID]):
|
||||
df = self.tids[msg[TID]]
|
||||
# callback
|
||||
del(self.tids[msg[TID]])
|
||||
df.callback({'rsp' : msg[RSP], '_krpc_sender': addr})
|
||||
else:
|
||||
# no tid, this transaction timed out already...
|
||||
pass
|
||||
|
||||
elif msg[TYP] == ERR:
|
||||
# if error
|
||||
# lookup tid
|
||||
if self.tids.has_key(msg[TID]):
|
||||
df = self.tids[msg[TID]]
|
||||
# callback
|
||||
df.errback(msg[ERR])
|
||||
del(self.tids[msg[TID]])
|
||||
else:
|
||||
# day late and dollar short
|
||||
pass
|
||||
else:
|
||||
print "unknown message type " + `msg`
|
||||
# unknown message type
|
||||
df = self.tids[msg[TID]]
|
||||
# callback
|
||||
df.errback(KRPC_ERROR_RECEIVED_UNKNOWN)
|
||||
del(self.tids[msg[TID]])
|
||||
|
||||
def sendRequest(self, method, args):
|
||||
# make message
|
||||
# send it
|
||||
msg = {TID : chr(self.mtid), TYP : REQ, REQ : method, ARG : args}
|
||||
self.mtid = (self.mtid + 1) % 256
|
||||
s = bencode(msg)
|
||||
d = Deferred()
|
||||
self.tids[msg[TID]] = d
|
||||
self.call_later(self.timeOut, KRPC_TIMEOUT, (msg[TID],))
|
||||
self.call_later(self._send, 0, (s, d))
|
||||
return d
|
||||
|
||||
def timeOut(self, id):
|
||||
if self.tids.has_key(id):
|
||||
df = self.tids[id]
|
||||
del(self.tids[id])
|
||||
df.errback(KRPC_ERROR_TIMEOUT)
|
||||
|
||||
def _send(self, s, d):
|
||||
try:
|
||||
self.transport.sendto(s, 0, self.addr)
|
||||
except socket.error:
|
||||
d.errback(KRPC_SOCKET_ERROR)
|
||||
|
119
khashmir/kstore.py
Executable file
119
khashmir/kstore.py
Executable file
@ -0,0 +1,119 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
try:
|
||||
from random import sample
|
||||
except ImportError:
|
||||
from random import choice
|
||||
def sample(l, n):
|
||||
if len(l) <= n:
|
||||
return l
|
||||
d = {}
|
||||
while len(d) < n:
|
||||
d[choice(l)] = 1
|
||||
return d.keys()
|
||||
|
||||
from BitTorrent.platform import bttime as time
|
||||
|
||||
class KItem:
|
||||
def __init__(self, key, value):
|
||||
self.t = time()
|
||||
self.k = key
|
||||
self.v = value
|
||||
def __cmp__(self, a):
|
||||
# same value = same item, only used to keep dupes out of db
|
||||
if self.v == a.v:
|
||||
return 0
|
||||
|
||||
# compare by time
|
||||
if self.t < a.t:
|
||||
return -1
|
||||
elif self.t > a.t:
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
def __hash__(self):
|
||||
return self.v.__hash__()
|
||||
|
||||
def __repr__(self):
|
||||
return `(self.k, self.v, time() - self.t)`
|
||||
|
||||
## in memory data store for distributed tracker
|
||||
## keeps a list of values per key in dictionary
|
||||
## keeps expiration for each key in a queue
|
||||
## can efficiently expire all values older than a given time
|
||||
## can insert one val at a time, or a list: ks['key'] = 'value' or ks['key'] = ['v1', 'v2', 'v3']
|
||||
class KStore:
|
||||
def __init__(self):
|
||||
self.d = {}
|
||||
self.q = []
|
||||
|
||||
def __getitem__(self, key):
|
||||
return [x.v for x in self.d[key]]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
if type(value) == type([]):
|
||||
[self.__setitem__(key, v) for v in value]
|
||||
return
|
||||
x = KItem(key, value)
|
||||
try:
|
||||
l = self.d[key]
|
||||
except KeyError:
|
||||
self.d[key] = [x]
|
||||
else:
|
||||
# this is slow
|
||||
try:
|
||||
i = l.index(x)
|
||||
del(l[i])
|
||||
except ValueError:
|
||||
pass
|
||||
l.insert(0, x)
|
||||
self.q.append(x)
|
||||
|
||||
def __delitem__(self, key):
|
||||
del(self.d[key])
|
||||
|
||||
def __len__(self):
|
||||
return len(self.d)
|
||||
|
||||
def keys(self):
|
||||
return self.d.keys()
|
||||
|
||||
def values(self):
|
||||
return [self[key] for key in self.keys()]
|
||||
|
||||
def items(self):
|
||||
return [(key, self[key]) for key in self.keys()]
|
||||
|
||||
def expire(self, t):
|
||||
#.expire values inserted prior to t
|
||||
try:
|
||||
while self.q[0].t <= t:
|
||||
x = self.q.pop(0)
|
||||
try:
|
||||
l = self.d[x.k]
|
||||
try:
|
||||
while l[-1].t <= t:
|
||||
l.pop()
|
||||
except IndexError:
|
||||
del(self.d[x.k])
|
||||
except KeyError:
|
||||
pass
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
def sample(self, key, n):
|
||||
# returns n random values of key, or all values if less than n
|
||||
try:
|
||||
l = [x.v for x in sample(self.d[key], n)]
|
||||
except ValueError:
|
||||
l = [x.v for x in self.d[key]]
|
||||
return l
|
340
khashmir/ktable.py
Executable file
340
khashmir/ktable.py
Executable file
@ -0,0 +1,340 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
from BitTorrent.platform import bttime as time
|
||||
from bisect import *
|
||||
from types import *
|
||||
|
||||
import khash as hash
|
||||
import const
|
||||
from const import K, HASH_LENGTH, NULL_ID, MAX_FAILURES, MIN_PING_INTERVAL
|
||||
from node import Node
|
||||
|
||||
class KTable:
|
||||
"""local routing table for a kademlia like distributed hash table"""
|
||||
def __init__(self, node):
|
||||
# this is the root node, a.k.a. US!
|
||||
self.node = node
|
||||
self.buckets = [KBucket([], 0L, 2L**HASH_LENGTH)]
|
||||
self.insertNode(node)
|
||||
|
||||
def _bucketIndexForInt(self, num):
|
||||
"""the index of the bucket that should hold int"""
|
||||
return bisect_left(self.buckets, num)
|
||||
|
||||
def bucketForInt(self, num):
|
||||
return self.buckets[self._bucketIndexForInt(num)]
|
||||
|
||||
def findNodes(self, id, invalid=True):
|
||||
"""
|
||||
return K nodes in our own local table closest to the ID.
|
||||
"""
|
||||
|
||||
if isinstance(id, str):
|
||||
num = hash.intify(id)
|
||||
elif isinstance(id, Node):
|
||||
num = id.num
|
||||
elif isinstance(id, int) or isinstance(id, long):
|
||||
num = id
|
||||
else:
|
||||
raise TypeError, "findNodes requires an int, string, or Node"
|
||||
|
||||
nodes = []
|
||||
i = self._bucketIndexForInt(num)
|
||||
|
||||
# if this node is already in our table then return it
|
||||
try:
|
||||
node = self.buckets[i].getNodeWithInt(num)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
return [node]
|
||||
|
||||
# don't have the node, get the K closest nodes
|
||||
nodes = nodes + self.buckets[i].l
|
||||
if not invalid:
|
||||
nodes = [a for a in nodes if not a.invalid]
|
||||
if len(nodes) < K:
|
||||
# need more nodes
|
||||
min = i - 1
|
||||
max = i + 1
|
||||
while len(nodes) < K and (min >= 0 or max < len(self.buckets)):
|
||||
#ASw: note that this requires K be even
|
||||
if min >= 0:
|
||||
nodes = nodes + self.buckets[min].l
|
||||
if max < len(self.buckets):
|
||||
nodes = nodes + self.buckets[max].l
|
||||
min = min - 1
|
||||
max = max + 1
|
||||
if not invalid:
|
||||
nodes = [a for a in nodes if not a.invalid]
|
||||
|
||||
nodes.sort(lambda a, b, num=num: cmp(num ^ a.num, num ^ b.num))
|
||||
return nodes[:K]
|
||||
|
||||
def _splitBucket(self, a):
|
||||
diff = (a.max - a.min) / 2
|
||||
b = KBucket([], a.max - diff, a.max)
|
||||
self.buckets.insert(self.buckets.index(a.min) + 1, b)
|
||||
a.max = a.max - diff
|
||||
# transfer nodes to new bucket
|
||||
for anode in a.l[:]:
|
||||
if anode.num >= a.max:
|
||||
a.removeNode(anode)
|
||||
b.addNode(anode)
|
||||
|
||||
def replaceStaleNode(self, stale, new):
|
||||
"""this is used by clients to replace a node returned by insertNode after
|
||||
it fails to respond to a Pong message"""
|
||||
i = self._bucketIndexForInt(stale.num)
|
||||
|
||||
if self.buckets[i].hasNode(stale):
|
||||
self.buckets[i].removeNode(stale)
|
||||
if new and self.buckets[i].hasNode(new):
|
||||
self.buckets[i].seenNode(new)
|
||||
elif new:
|
||||
self.buckets[i].addNode(new)
|
||||
|
||||
return
|
||||
|
||||
def insertNode(self, node, contacted=1, nocheck=False):
|
||||
"""
|
||||
this insert the node, returning None if successful, returns the oldest node in the bucket if it's full
|
||||
the caller responsible for pinging the returned node and calling replaceStaleNode if it is found to be stale!!
|
||||
contacted means that yes, we contacted THEM and we know the node is reachable
|
||||
"""
|
||||
if node.id == NULL_ID or node.id == self.node.id:
|
||||
return
|
||||
|
||||
if contacted:
|
||||
node.updateLastSeen()
|
||||
|
||||
# get the bucket for this node
|
||||
i = self._bucketIndexForInt(node.num)
|
||||
# check to see if node is in the bucket already
|
||||
if self.buckets[i].hasNode(node):
|
||||
it = self.buckets[i].l.index(node.num)
|
||||
xnode = self.buckets[i].l[it]
|
||||
if contacted:
|
||||
node.age = xnode.age
|
||||
self.buckets[i].seenNode(node)
|
||||
elif xnode.lastSeen != 0 and xnode.port == node.port and xnode.host == node.host:
|
||||
xnode.updateLastSeen()
|
||||
return
|
||||
|
||||
# we don't have this node, check to see if the bucket is full
|
||||
if not self.buckets[i].bucketFull():
|
||||
# no, append this node and return
|
||||
self.buckets[i].addNode(node)
|
||||
return
|
||||
|
||||
# full bucket, check to see if any nodes are invalid
|
||||
t = time()
|
||||
def ls(a, b):
|
||||
if a.lastSeen > b.lastSeen:
|
||||
return 1
|
||||
elif b.lastSeen > a.lastSeen:
|
||||
return -1
|
||||
return 0
|
||||
|
||||
invalid = [x for x in self.buckets[i].invalid.values() if x.invalid]
|
||||
if len(invalid) and not nocheck:
|
||||
invalid.sort(ls)
|
||||
while invalid and not self.buckets[i].hasNode(invalid[0]):
|
||||
del(self.buckets[i].invalid[invalid[0].num])
|
||||
invalid = invalid[1:]
|
||||
if invalid and (invalid[0].lastSeen == 0 and invalid[0].fails < MAX_FAILURES):
|
||||
return invalid[0]
|
||||
elif invalid:
|
||||
self.replaceStaleNode(invalid[0], node)
|
||||
return
|
||||
|
||||
stale = [n for n in self.buckets[i].l if (t - n.lastSeen) > MIN_PING_INTERVAL]
|
||||
if len(stale) and not nocheck:
|
||||
stale.sort(ls)
|
||||
return stale[0]
|
||||
|
||||
# bucket is full and all nodes are valid, check to see if self.node is in the bucket
|
||||
if not (self.buckets[i].min <= self.node < self.buckets[i].max):
|
||||
return
|
||||
|
||||
# this bucket is full and contains our node, split the bucket
|
||||
if len(self.buckets) >= HASH_LENGTH:
|
||||
# our table is FULL, this is really unlikely
|
||||
print "Hash Table is FULL! Increase K!"
|
||||
return
|
||||
|
||||
self._splitBucket(self.buckets[i])
|
||||
|
||||
# now that the bucket is split and balanced, try to insert the node again
|
||||
return self.insertNode(node, contacted)
|
||||
|
||||
def justSeenNode(self, id):
|
||||
"""call this any time you get a message from a node
|
||||
it will update it in the table if it's there """
|
||||
try:
|
||||
n = self.findNodes(id)[0]
|
||||
except IndexError:
|
||||
return None
|
||||
else:
|
||||
tstamp = n.lastSeen
|
||||
n.updateLastSeen()
|
||||
bucket = self.bucketForInt(n.num)
|
||||
bucket.seenNode(n)
|
||||
return tstamp
|
||||
|
||||
def invalidateNode(self, n):
|
||||
"""
|
||||
forget about node n - use when you know that node is invalid
|
||||
"""
|
||||
n.invalid = True
|
||||
self.bucket = self.bucketForInt(n.num)
|
||||
self.bucket.invalidateNode(n)
|
||||
|
||||
def nodeFailed(self, node):
|
||||
""" call this when a node fails to respond to a message, to invalidate that node """
|
||||
try:
|
||||
n = self.findNodes(node.num)[0]
|
||||
except IndexError:
|
||||
return None
|
||||
else:
|
||||
if n.msgFailed() >= const.MAX_FAILURES:
|
||||
self.invalidateNode(n)
|
||||
|
||||
def numPeers(self):
|
||||
""" estimated number of connectable nodes in global table """
|
||||
return 8 * (2 ** (len(self.buckets) - 1))
|
||||
|
||||
class KBucket:
|
||||
__slots__ = ('min', 'max', 'lastAccessed')
|
||||
def __init__(self, contents, min, max):
|
||||
self.l = contents
|
||||
self.index = {}
|
||||
self.invalid = {}
|
||||
self.min = min
|
||||
self.max = max
|
||||
self.lastAccessed = time()
|
||||
|
||||
def touch(self):
|
||||
self.lastAccessed = time()
|
||||
|
||||
def lacmp(self, a, b):
|
||||
if a.lastSeen > b.lastSeen:
|
||||
return 1
|
||||
elif b.lastSeen > a.lastSeen:
|
||||
return -1
|
||||
return 0
|
||||
|
||||
def sort(self):
|
||||
self.l.sort(self.lacmp)
|
||||
|
||||
def getNodeWithInt(self, num):
|
||||
try:
|
||||
node = self.index[num]
|
||||
except KeyError:
|
||||
raise ValueError
|
||||
return node
|
||||
|
||||
def addNode(self, node):
|
||||
if len(self.l) >= K:
|
||||
return
|
||||
if self.index.has_key(node.num):
|
||||
return
|
||||
self.l.append(node)
|
||||
self.index[node.num] = node
|
||||
self.touch()
|
||||
|
||||
def removeNode(self, node):
|
||||
assert self.index.has_key(node.num)
|
||||
del(self.l[self.l.index(node.num)])
|
||||
del(self.index[node.num])
|
||||
try:
|
||||
del(self.invalid[node.num])
|
||||
except KeyError:
|
||||
pass
|
||||
self.touch()
|
||||
|
||||
def invalidateNode(self, node):
|
||||
self.invalid[node.num] = node
|
||||
|
||||
def seenNode(self, node):
|
||||
try:
|
||||
del(self.invalid[node.num])
|
||||
except KeyError:
|
||||
pass
|
||||
it = self.l.index(node.num)
|
||||
del(self.l[it])
|
||||
self.l.append(node)
|
||||
self.index[node.num] = node
|
||||
|
||||
def hasNode(self, node):
|
||||
return self.index.has_key(node.num)
|
||||
|
||||
def bucketFull(self):
|
||||
return len(self.l) >= K
|
||||
|
||||
def __repr__(self):
|
||||
return "<KBucket %d items (%d to %d)>" % (len(self.l), self.min, self.max)
|
||||
|
||||
## Comparators
|
||||
# necessary for bisecting list of buckets with a hash expressed as an integer or a distance
|
||||
# compares integer or node object with the bucket's range
|
||||
def __lt__(self, a):
|
||||
if isinstance(a, Node): a = a.num
|
||||
return self.max <= a
|
||||
def __le__(self, a):
|
||||
if isinstance(a, Node): a = a.num
|
||||
return self.min < a
|
||||
def __gt__(self, a):
|
||||
if isinstance(a, Node): a = a.num
|
||||
return self.min > a
|
||||
def __ge__(self, a):
|
||||
if isinstance(a, Node): a = a.num
|
||||
return self.max >= a
|
||||
def __eq__(self, a):
|
||||
if isinstance(a, Node): a = a.num
|
||||
return self.min <= a and self.max > a
|
||||
def __ne__(self, a):
|
||||
if isinstance(a, Node): a = a.num
|
||||
return self.min >= a or self.max < a
|
||||
|
||||
|
||||
### UNIT TESTS ###
|
||||
import unittest
|
||||
|
||||
class TestKTable(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.a = Node().init(hash.newID(), 'localhost', 2002)
|
||||
self.t = KTable(self.a)
|
||||
|
||||
def testAddNode(self):
|
||||
self.b = Node().init(hash.newID(), 'localhost', 2003)
|
||||
self.t.insertNode(self.b)
|
||||
self.assertEqual(len(self.t.buckets[0].l), 1)
|
||||
self.assertEqual(self.t.buckets[0].l[0], self.b)
|
||||
|
||||
def testRemove(self):
|
||||
self.testAddNode()
|
||||
self.t.invalidateNode(self.b)
|
||||
self.assertEqual(len(self.t.buckets[0].l), 0)
|
||||
|
||||
def testFail(self):
|
||||
self.testAddNode()
|
||||
for i in range(const.MAX_FAILURES - 1):
|
||||
self.t.nodeFailed(self.b)
|
||||
self.assertEqual(len(self.t.buckets[0].l), 1)
|
||||
self.assertEqual(self.t.buckets[0].l[0], self.b)
|
||||
|
||||
self.t.nodeFailed(self.b)
|
||||
self.assertEqual(len(self.t.buckets[0].l), 0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
95
khashmir/node.py
Executable file
95
khashmir/node.py
Executable file
@ -0,0 +1,95 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
import khash
|
||||
from BitTorrent.platform import bttime as time
|
||||
from types import *
|
||||
|
||||
class Node:
|
||||
"""encapsulate contact info"""
|
||||
def __init__(self):
|
||||
self.fails = 0
|
||||
self.lastSeen = 0
|
||||
self.invalid = True
|
||||
self.id = self.host = self.port = ''
|
||||
self.age = time()
|
||||
|
||||
def init(self, id, host, port):
|
||||
self.id = id
|
||||
self.num = khash.intify(id)
|
||||
self.host = host
|
||||
self.port = port
|
||||
self._senderDict = {'id': self.id, 'port' : self.port, 'host' : self.host}
|
||||
return self
|
||||
|
||||
def initWithDict(self, dict):
|
||||
self._senderDict = dict
|
||||
self.id = dict['id']
|
||||
self.num = khash.intify(self.id)
|
||||
self.port = dict['port']
|
||||
self.host = dict['host']
|
||||
self.age = dict.get('age', self.age)
|
||||
return self
|
||||
|
||||
def updateLastSeen(self):
|
||||
self.lastSeen = time()
|
||||
self.fails = 0
|
||||
self.invalid = False
|
||||
|
||||
def msgFailed(self):
|
||||
self.fails = self.fails + 1
|
||||
return self.fails
|
||||
|
||||
def senderDict(self):
|
||||
return self._senderDict
|
||||
|
||||
def __hash__(self):
|
||||
return self.id.__hash__()
|
||||
|
||||
def __repr__(self):
|
||||
return ">node <%s> %s<" % (self.id.encode('base64')[:4], (self.host, self.port))
|
||||
|
||||
## these comparators let us bisect/index a list full of nodes with either a node or an int/long
|
||||
def __lt__(self, a):
|
||||
if type(a) == InstanceType:
|
||||
a = a.num
|
||||
return self.num < a
|
||||
def __le__(self, a):
|
||||
if type(a) == InstanceType:
|
||||
a = a.num
|
||||
return self.num <= a
|
||||
def __gt__(self, a):
|
||||
if type(a) == InstanceType:
|
||||
a = a.num
|
||||
return self.num > a
|
||||
def __ge__(self, a):
|
||||
if type(a) == InstanceType:
|
||||
a = a.num
|
||||
return self.num >= a
|
||||
def __eq__(self, a):
|
||||
if type(a) == InstanceType:
|
||||
a = a.num
|
||||
return self.num == a
|
||||
def __ne__(self, a):
|
||||
if type(a) == InstanceType:
|
||||
a = a.num
|
||||
return self.num != a
|
||||
|
||||
|
||||
import unittest
|
||||
|
||||
class TestNode(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.node = Node().init(khash.newID(), 'localhost', 2002)
|
||||
def testUpdateLastSeen(self):
|
||||
t = self.node.lastSeen
|
||||
self.node.updateLastSeen()
|
||||
assert t < self.node.lastSeen
|
||||
|
70
khashmir/setup.py
Executable file
70
khashmir/setup.py
Executable file
@ -0,0 +1,70 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
try:
|
||||
import distutils.core
|
||||
import distutils.command.build_ext
|
||||
except ImportError:
|
||||
raise SystemExit, """\
|
||||
You don't have the python development modules installed.
|
||||
|
||||
If you have Debian you can install it by running
|
||||
apt-get install python-dev
|
||||
|
||||
If you have RedHat and know how to install this from an RPM please
|
||||
email us so we can put instructions here.
|
||||
"""
|
||||
|
||||
try:
|
||||
import twisted
|
||||
except ImportError:
|
||||
raise SystemExit, """\
|
||||
You don't have Twisted installed.
|
||||
|
||||
Twisted can be downloaded from
|
||||
http://twistedmatrix.com/products/download
|
||||
|
||||
Anything later that version 1.0.3 should work
|
||||
"""
|
||||
|
||||
try:
|
||||
import sqlite
|
||||
except ImportError:
|
||||
raise SystemExit, """\
|
||||
You don't have PySQLite installed.
|
||||
|
||||
PySQLite can be downloaded from
|
||||
http://sourceforge.net/project/showfiles.php?group_id=54058&release_id=139482
|
||||
"""
|
||||
|
||||
setup_args = {
|
||||
'name': 'khashmir',
|
||||
'author': 'Andrew Loewenstern',
|
||||
'author_email': 'burris@users.sourceforge.net',
|
||||
'licence': 'MIT',
|
||||
'package_dir': {'khashmir': '.'},
|
||||
'packages': [
|
||||
'khashmir',
|
||||
],
|
||||
}
|
||||
|
||||
if hasattr(distutils.dist.DistributionMetadata, 'get_keywords'):
|
||||
setup_args['keywords'] = "internet tcp p2p"
|
||||
|
||||
if hasattr(distutils.dist.DistributionMetadata, 'get_platforms'):
|
||||
setup_args['platforms'] = "win32 posix"
|
||||
|
||||
if __name__ == '__main__':
|
||||
apply(distutils.core.setup, (), setup_args)
|
21
khashmir/test.py
Executable file
21
khashmir/test.py
Executable file
@ -0,0 +1,21 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
import unittest
|
||||
|
||||
import ktable, khashmir
|
||||
import khash, node, knode
|
||||
import actions
|
||||
import test_krpc
|
||||
import test_khashmir
|
||||
import kstore
|
||||
|
||||
tests = unittest.defaultTestLoader.loadTestsFromNames(['kstore', 'khash', 'node', 'knode', 'actions', 'ktable', 'test_krpc', 'test_khashmir'])
|
||||
result = unittest.TextTestRunner().run(tests)
|
166
khashmir/test_khashmir.py
Executable file
166
khashmir/test_khashmir.py
Executable file
@ -0,0 +1,166 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
from unittest import *
|
||||
|
||||
from BitTorrent import RawServer_magic
|
||||
|
||||
from khashmir import *
|
||||
import khash
|
||||
from copy import copy
|
||||
|
||||
from random import randrange
|
||||
from krpc import KRPC
|
||||
|
||||
KRPC.noisy=0
|
||||
import os
|
||||
|
||||
if __name__ =="__main__":
|
||||
tests = defaultTestLoader.loadTestsFromNames([sys.argv[0][:-3]])
|
||||
result = TextTestRunner().run(tests)
|
||||
|
||||
class MultiTest(TestCase):
|
||||
num = 25
|
||||
def _done(self, val):
|
||||
self.done = 1
|
||||
|
||||
def setUp(self):
|
||||
self.l = []
|
||||
self.startport = 10088
|
||||
d = dict([(x[0],x[1]) for x in common_options + rare_options])
|
||||
self.r = RawServer(Event(), d)
|
||||
for i in range(self.num):
|
||||
self.l.append(Khashmir('127.0.0.1', self.startport + i, '/tmp/%s.test' % (self.startport + i), self.r))
|
||||
self.r.listen_once(1)
|
||||
self.r.listen_once(1)
|
||||
|
||||
for i in self.l:
|
||||
try:
|
||||
i.addContact('127.0.0.1', self.l[randrange(0,self.num)].port)
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
i.addContact('127.0.0.1', self.l[randrange(0,self.num)].port)
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
i.addContact('127.0.0.1', self.l[randrange(0,self.num)].port)
|
||||
except:
|
||||
pass
|
||||
self.r.listen_once(1)
|
||||
self.r.listen_once(1)
|
||||
self.r.listen_once(1)
|
||||
|
||||
for i in self.l:
|
||||
self.done = 0
|
||||
i.findCloseNodes(self._done)
|
||||
while not self.done:
|
||||
self.r.listen_once(1)
|
||||
for i in self.l:
|
||||
self.done = 0
|
||||
i.findCloseNodes(self._done)
|
||||
while not self.done:
|
||||
self.r.listen_once(1)
|
||||
|
||||
def tearDown(self):
|
||||
for i in self.l:
|
||||
self.r.stop_listening_udp(i.socket)
|
||||
i.socket.close()
|
||||
|
||||
self.r.listen_once(1)
|
||||
|
||||
def testStoreRetrieve(self):
|
||||
for i in range(10):
|
||||
K = khash.newID()
|
||||
V = khash.newID()
|
||||
|
||||
for a in range(3):
|
||||
self.done = 0
|
||||
def _scb(val):
|
||||
self.done = 1
|
||||
self.l[randrange(0, self.num)].storeValueForKey(K, V, _scb)
|
||||
while not self.done:
|
||||
self.r.listen_once(1)
|
||||
|
||||
|
||||
def _rcb(val):
|
||||
if not val:
|
||||
self.done = 1
|
||||
self.assertEqual(self.got, 1)
|
||||
elif V in val:
|
||||
self.got = 1
|
||||
for x in range(3):
|
||||
self.got = 0
|
||||
self.done = 0
|
||||
self.l[randrange(0, self.num)].valueForKey(K, _rcb)
|
||||
while not self.done:
|
||||
self.r.listen_once(1)
|
||||
|
||||
class AASimpleTests(TestCase):
|
||||
def setUp(self):
|
||||
d = dict([(x[0],x[1]) for x in common_options + rare_options])
|
||||
self.r = RawServer(Event(), d)
|
||||
self.a = Khashmir('127.0.0.1', 4044, '/tmp/a.test', self.r)
|
||||
self.b = Khashmir('127.0.0.1', 4045, '/tmp/b.test', self.r)
|
||||
|
||||
def tearDown(self):
|
||||
self.r.stop_listening_udp(self.a.socket)
|
||||
self.r.stop_listening_udp(self.b.socket)
|
||||
self.a.socket.close()
|
||||
self.b.socket.close()
|
||||
|
||||
def addContacts(self):
|
||||
self.a.addContact('127.0.0.1', 4045)
|
||||
self.r.listen_once(1)
|
||||
self.r.listen_once(1)
|
||||
|
||||
def testStoreRetrieve(self):
|
||||
self.addContacts()
|
||||
self.got = 0
|
||||
self.a.storeValueForKey(sha('foo').digest(), 'foobar')
|
||||
self.r.listen_once(1)
|
||||
self.r.listen_once(1)
|
||||
self.r.listen_once(1)
|
||||
self.r.listen_once(1)
|
||||
self.r.listen_once(1)
|
||||
self.r.listen_once(1)
|
||||
self.a.valueForKey(sha('foo').digest(), self._cb)
|
||||
self.r.listen_once(1)
|
||||
self.r.listen_once(1)
|
||||
self.r.listen_once(1)
|
||||
self.r.listen_once(1)
|
||||
|
||||
def _cb(self, val):
|
||||
if not val:
|
||||
self.assertEqual(self.got, 1)
|
||||
elif 'foobar' in val:
|
||||
self.got = 1
|
||||
|
||||
def testAddContact(self):
|
||||
self.assertEqual(len(self.a.table.buckets), 1)
|
||||
self.assertEqual(len(self.a.table.buckets[0].l), 0)
|
||||
|
||||
self.assertEqual(len(self.b.table.buckets), 1)
|
||||
self.assertEqual(len(self.b.table.buckets[0].l), 0)
|
||||
|
||||
self.addContacts()
|
||||
|
||||
self.assertEqual(len(self.a.table.buckets), 1)
|
||||
self.assertEqual(len(self.a.table.buckets[0].l), 1)
|
||||
self.assertEqual(len(self.b.table.buckets), 1)
|
||||
self.assertEqual(len(self.b.table.buckets[0].l), 1)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
161
khashmir/test_krpc.py
Executable file
161
khashmir/test_krpc.py
Executable file
@ -0,0 +1,161 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
from unittest import *
|
||||
from krpc import *
|
||||
from BitTorrent.defaultargs import common_options, rare_options
|
||||
from threading import Event
|
||||
from node import Node
|
||||
|
||||
KRPC.noisy = 0
|
||||
|
||||
import sys
|
||||
|
||||
if __name__ =="__main__":
|
||||
tests = defaultTestLoader.loadTestsFromNames([sys.argv[0][:-3]])
|
||||
result = TextTestRunner().run(tests)
|
||||
|
||||
|
||||
def connectionForAddr(host, port):
|
||||
return host
|
||||
|
||||
|
||||
class Receiver(object):
|
||||
protocol = KRPC
|
||||
def __init__(self, addr):
|
||||
self.buf = []
|
||||
self.node = Node().init('0'*20, addr[0], addr[1])
|
||||
def krpc_store(self, msg, _krpc_sender):
|
||||
self.buf += [msg]
|
||||
def krpc_echo(self, msg, _krpc_sender):
|
||||
return msg
|
||||
|
||||
class KRPCTests(TestCase):
|
||||
def setUp(self):
|
||||
self.noisy = 0
|
||||
d = dict([(x[0],x[1]) for x in common_options + rare_options])
|
||||
self.r = RawServer(Event(), d)
|
||||
|
||||
addr = ('127.0.0.1', 1180)
|
||||
self.as = self.r.create_udpsocket(addr[1], addr[0], True)
|
||||
self.af = Receiver(addr)
|
||||
self.a = hostbroker(self.af, addr, self.as, self.r.add_task)
|
||||
self.r.start_listening_udp(self.as, self.a)
|
||||
|
||||
addr = ('127.0.0.1', 1181)
|
||||
self.bs = self.r.create_udpsocket(addr[1], addr[0], True)
|
||||
self.bf = Receiver(addr)
|
||||
self.b = hostbroker(self.bf, addr, self.bs, self.r.add_task)
|
||||
self.r.start_listening_udp(self.bs, self.b)
|
||||
|
||||
def tearDown(self):
|
||||
self.as.close()
|
||||
self.bs.close()
|
||||
|
||||
def testSimpleMessage(self):
|
||||
self.noisy = 0
|
||||
self.a.connectionForAddr(('127.0.0.1', 1181)).sendRequest('store', {'msg' : "This is a test."})
|
||||
self.r.listen_once(0.01)
|
||||
self.assertEqual(self.bf.buf, ["This is a test."])
|
||||
|
||||
def testMessageBlast(self):
|
||||
self.a.connectionForAddr(('127.0.0.1', 1181)).sendRequest('store', {'msg' : "This is a test."})
|
||||
self.r.listen_once(0.01)
|
||||
self.assertEqual(self.bf.buf, ["This is a test."])
|
||||
self.bf.buf = []
|
||||
|
||||
for i in range(100):
|
||||
self.a.connectionForAddr(('127.0.0.1', 1181)).sendRequest('store', {'msg' : "This is a test."})
|
||||
self.r.listen_once(0.01)
|
||||
#self.bf.buf = []
|
||||
self.assertEqual(self.bf.buf, ["This is a test."] * 100)
|
||||
|
||||
def testEcho(self):
|
||||
df = self.a.connectionForAddr(('127.0.0.1', 1181)).sendRequest('echo', {'msg' : "This is a test."})
|
||||
df.addCallback(self.gotMsg)
|
||||
self.r.listen_once(0.01)
|
||||
self.r.listen_once(0.01)
|
||||
self.assertEqual(self.msg, "This is a test.")
|
||||
|
||||
def gotMsg(self, dict):
|
||||
_krpc_sender = dict['_krpc_sender']
|
||||
msg = dict['rsp']
|
||||
self.msg = msg
|
||||
|
||||
def testManyEcho(self):
|
||||
df = self.a.connectionForAddr(('127.0.0.1', 1181)).sendRequest('echo', {'msg' : "This is a test."})
|
||||
df.addCallback(self.gotMsg)
|
||||
self.r.listen_once(0.01)
|
||||
self.r.listen_once(0.01)
|
||||
self.assertEqual(self.msg, "This is a test.")
|
||||
for i in xrange(100):
|
||||
self.msg = None
|
||||
df = self.a.connectionForAddr(('127.0.0.1', 1181)).sendRequest('echo', {'msg' : "This is a test."})
|
||||
df.addCallback(self.gotMsg)
|
||||
self.r.listen_once(0.01)
|
||||
self.r.listen_once(0.01)
|
||||
self.assertEqual(self.msg, "This is a test.")
|
||||
|
||||
def testMultiEcho(self):
|
||||
self.noisy = 0
|
||||
df = self.a.connectionForAddr(('127.0.0.1', 1181)).sendRequest('echo', {'msg' : "This is a test."})
|
||||
df.addCallback(self.gotMsg)
|
||||
self.r.listen_once(0.01)
|
||||
self.r.listen_once(0.01)
|
||||
self.assertEqual(self.msg, "This is a test.")
|
||||
|
||||
df = self.a.connectionForAddr(('127.0.0.1', 1181)).sendRequest('echo', {'msg' : "This is another test."})
|
||||
df.addCallback(self.gotMsg)
|
||||
self.r.listen_once(0.01)
|
||||
self.r.listen_once(0.01)
|
||||
self.assertEqual(self.msg, "This is another test.")
|
||||
|
||||
df = self.a.connectionForAddr(('127.0.0.1', 1181)).sendRequest('echo', {'msg' : "This is yet another test."})
|
||||
df.addCallback(self.gotMsg)
|
||||
self.r.listen_once(0.01)
|
||||
self.r.listen_once(0.01)
|
||||
self.assertEqual(self.msg, "This is yet another test.")
|
||||
|
||||
def testEchoReset(self):
|
||||
self.noisy = 0
|
||||
df = self.a.connectionForAddr(('127.0.0.1', 1181)).sendRequest('echo', {'msg' : "This is a test."})
|
||||
df.addCallback(self.gotMsg)
|
||||
self.r.listen_once(0.01)
|
||||
self.r.listen_once(0.01)
|
||||
self.assertEqual(self.msg, "This is a test.")
|
||||
|
||||
df = self.a.connectionForAddr(('127.0.0.1', 1181)).sendRequest('echo', {'msg' : "This is another test."})
|
||||
df.addCallback(self.gotMsg)
|
||||
self.r.listen_once(0.01)
|
||||
self.r.listen_once(0.01)
|
||||
self.assertEqual(self.msg, "This is another test.")
|
||||
|
||||
del(self.a.connections[('127.0.0.1', 1181)])
|
||||
df = self.a.connectionForAddr(('127.0.0.1', 1181)).sendRequest('echo', {'msg' : "This is yet another test."})
|
||||
df.addCallback(self.gotMsg)
|
||||
self.r.listen_once(0.01)
|
||||
self.r.listen_once(0.01)
|
||||
self.assertEqual(self.msg, "This is yet another test.")
|
||||
|
||||
def testLotsofEchoReset(self):
|
||||
for i in range(100):
|
||||
self.testEchoReset()
|
||||
|
||||
def testUnknownMeth(self):
|
||||
self.noisy = 0
|
||||
df = self.a.connectionForAddr(('127.0.0.1', 1181)).sendRequest('blahblah', {'msg' : "This is a test."})
|
||||
df.addErrback(self.gotErr)
|
||||
self.r.listen_once(0.01)
|
||||
self.r.listen_once(0.01)
|
||||
self.assertEqual(self.err, KRPC_ERROR_METHOD_UNKNOWN)
|
||||
|
||||
def gotErr(self, err):
|
||||
self.err = err
|
||||
|
91
khashmir/test_kstore.py
Executable file
91
khashmir/test_kstore.py
Executable file
@ -0,0 +1,91 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
import unittest
|
||||
from BitTorrent.platform import bttime
|
||||
from time import sleep
|
||||
|
||||
from kstore import KStore
|
||||
if __name__ =="__main__":
|
||||
tests = unittest.defaultTestLoader.loadTestsFromNames(['test_kstore'])
|
||||
result = unittest.TextTestRunner().run(tests)
|
||||
|
||||
|
||||
class BasicTests(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.k = KStore()
|
||||
|
||||
def testNoKeys(self):
|
||||
self.assertEqual(self.k.keys(), [])
|
||||
|
||||
def testKey(self):
|
||||
self.k['foo'] = 'bar'
|
||||
self.assertEqual(self.k.keys(), ['foo'])
|
||||
|
||||
def testKeys(self):
|
||||
self.k['foo'] = 'bar'
|
||||
self.k['wing'] = 'wang'
|
||||
l = self.k.keys()
|
||||
l.sort()
|
||||
self.assertEqual(l, ['foo', 'wing'])
|
||||
|
||||
def testInsert(self):
|
||||
self.k['foo'] = 'bar'
|
||||
self.assertEqual(self.k['foo'], ['bar'])
|
||||
|
||||
def testInsertTwo(self):
|
||||
self.k['foo'] = 'bar'
|
||||
self.k['foo'] = 'bing'
|
||||
l = self.k['foo']
|
||||
l.sort()
|
||||
self.assertEqual(l, ['bar', 'bing'])
|
||||
|
||||
def testExpire(self):
|
||||
self.k['foo'] = 'bar'
|
||||
self.k.expire(bttime() - 1)
|
||||
l = self.k['foo']
|
||||
l.sort()
|
||||
self.assertEqual(l, ['bar'])
|
||||
self.k['foo'] = 'bing'
|
||||
t = bttime()
|
||||
self.k.expire(bttime() - 1)
|
||||
l = self.k['foo']
|
||||
l.sort()
|
||||
self.assertEqual(l, ['bar', 'bing'])
|
||||
self.k['foo'] = 'ding'
|
||||
self.k['foo'] = 'dang'
|
||||
l = self.k['foo']
|
||||
l.sort()
|
||||
self.assertEqual(l, ['bar', 'bing', 'dang', 'ding'])
|
||||
self.k.expire(t)
|
||||
l = self.k['foo']
|
||||
l.sort()
|
||||
self.assertEqual(l, ['dang', 'ding'])
|
||||
|
||||
def testDup(self):
|
||||
self.k['foo'] = 'bar'
|
||||
self.k['foo'] = 'bar'
|
||||
self.assertEqual(self.k['foo'], ['bar'])
|
||||
|
||||
def testSample(self):
|
||||
for i in xrange(2):
|
||||
self.k['foo'] = i
|
||||
l = self.k.sample('foo', 5)
|
||||
l.sort()
|
||||
self.assertEqual(l, [0, 1])
|
||||
|
||||
for i in xrange(10):
|
||||
for i in xrange(10):
|
||||
self.k['bar'] = i
|
||||
l = self.k.sample('bar', 5)
|
||||
self.assertEqual(len(l), 5)
|
||||
for i in xrange(len(l)):
|
||||
self.assert_(l[i] not in l[i+1:])
|
||||
|
84
khashmir/unet.py
Executable file
84
khashmir/unet.py
Executable file
@ -0,0 +1,84 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
#
|
||||
# knet.py
|
||||
# create a network of khashmir nodes
|
||||
# usage: knet.py <num_nodes> <start_port> <ip_address>
|
||||
|
||||
from utkhashmir import UTKhashmir
|
||||
from BitTorrent.RawServer_magic import RawServer
|
||||
from BitTorrent.defaultargs import common_options, rare_options
|
||||
from random import randrange
|
||||
from threading import Event
|
||||
import sys, os
|
||||
|
||||
from krpc import KRPC
|
||||
KRPC.noisy = 1
|
||||
|
||||
class Network:
|
||||
def __init__(self, size=0, startport=5555, localip='127.0.0.1'):
|
||||
self.num = size
|
||||
self.startport = startport
|
||||
self.localip = localip
|
||||
|
||||
def _done(self, val):
|
||||
self.done = 1
|
||||
|
||||
def simpleSetUp(self):
|
||||
#self.kfiles()
|
||||
d = dict([(x[0],x[1]) for x in common_options + rare_options])
|
||||
self.r = RawServer(Event(), d)
|
||||
self.l = []
|
||||
for i in range(self.num):
|
||||
self.l.append(UTKhashmir('', self.startport + i, 'kh%s.db' % (self.startport + i), self.r))
|
||||
|
||||
for i in self.l:
|
||||
i.addContact(self.localip, self.l[randrange(0,self.num)].port)
|
||||
i.addContact(self.localip, self.l[randrange(0,self.num)].port)
|
||||
i.addContact(self.localip, self.l[randrange(0,self.num)].port)
|
||||
self.r.listen_once(1)
|
||||
self.r.listen_once(1)
|
||||
self.r.listen_once(1)
|
||||
|
||||
for i in self.l:
|
||||
self.done = 0
|
||||
i.findCloseNodes(self._done)
|
||||
while not self.done:
|
||||
self.r.listen_once(1)
|
||||
for i in self.l:
|
||||
self.done = 0
|
||||
i.findCloseNodes(self._done)
|
||||
while not self.done:
|
||||
self.r.listen_once(1)
|
||||
|
||||
def tearDown(self):
|
||||
for i in self.l:
|
||||
i.rawserver.stop_listening_udp(i.socket)
|
||||
i.socket.close()
|
||||
#self.kfiles()
|
||||
|
||||
def kfiles(self):
|
||||
for i in range(self.startport, self.startport+self.num):
|
||||
try:
|
||||
os.unlink('kh%s.db' % i)
|
||||
except:
|
||||
pass
|
||||
|
||||
self.r.listen_once(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
n = Network(int(sys.argv[1]), int(sys.argv[2]))
|
||||
n.simpleSetUp()
|
||||
print ">>> network ready"
|
||||
try:
|
||||
n.r.listen_forever()
|
||||
finally:
|
||||
n.tearDown()
|
69
khashmir/util.py
Executable file
69
khashmir/util.py
Executable file
@ -0,0 +1,69 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
from struct import pack, unpack
|
||||
|
||||
def bucket_stats(l):
|
||||
"""given a list of khashmir instances, finds min, max, and average number of nodes in tables"""
|
||||
max = avg = 0
|
||||
min = None
|
||||
def count(buckets):
|
||||
c = 0
|
||||
for bucket in buckets:
|
||||
c = c + len(bucket.l)
|
||||
return c
|
||||
for node in l:
|
||||
c = count(node.table.buckets)
|
||||
if min == None:
|
||||
min = c
|
||||
elif c < min:
|
||||
min = c
|
||||
if c > max:
|
||||
max = c
|
||||
avg = avg + c
|
||||
avg = avg / len(l)
|
||||
return {'min':min, 'max':max, 'avg':avg}
|
||||
|
||||
def compact_peer_info(ip, port):
|
||||
return pack('!BBBBH', *([int(i) for i in ip.split('.')] + [port]))
|
||||
|
||||
def packPeers(peers):
|
||||
return map(lambda a: compact_peer_info(a[0], a[1]), peers)
|
||||
|
||||
def reducePeers(peers):
|
||||
return reduce(lambda a, b: a + b, peers, '')
|
||||
|
||||
def unpackPeers(p):
|
||||
peers = []
|
||||
if type(p) == type(''):
|
||||
for x in xrange(0, len(p), 6):
|
||||
ip = '.'.join([str(ord(i)) for i in p[x:x+4]])
|
||||
port = unpack('!H', p[x+4:x+6])[0]
|
||||
peers.append((ip, port, None))
|
||||
else:
|
||||
for x in p:
|
||||
peers.append((x['ip'], x['port'], x.get('peer id')))
|
||||
return peers
|
||||
|
||||
|
||||
def compact_node_info(id, ip, port):
|
||||
return id + compact_peer_info(ip, port)
|
||||
|
||||
def packNodes(nodes):
|
||||
return ''.join([compact_node_info(x['id'], x['host'], x['port']) for x in nodes])
|
||||
|
||||
def unpackNodes(n):
|
||||
nodes = []
|
||||
for x in xrange(0, len(n), 26):
|
||||
id = n[x:x+20]
|
||||
ip = '.'.join([str(ord(i)) for i in n[x+20:x+24]])
|
||||
port = unpack('!H', n[x+24:x+26])[0]
|
||||
nodes.append({'id':id, 'host':ip, 'port': port})
|
||||
return nodes
|
218
khashmir/utkhashmir.py
Executable file
218
khashmir/utkhashmir.py
Executable file
@ -0,0 +1,218 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
import khashmir, knode
|
||||
from actions import *
|
||||
from khash import newID
|
||||
from krpc import KRPCProtocolError, KRPCFailSilently
|
||||
from cache import Cache
|
||||
from sha import sha
|
||||
from util import *
|
||||
from threading import Thread
|
||||
from socket import gethostbyname
|
||||
from const import *
|
||||
from kstore import sample
|
||||
|
||||
TOKEN_UPDATE_INTERVAL = 5 * 60 # five minutes
|
||||
NUM_PEERS = 50 # number of peers to return
|
||||
|
||||
class UTNode(knode.KNodeBase):
|
||||
def announcePeer(self, info_hash, port, khashmir_id):
|
||||
assert type(port) == type(1)
|
||||
assert type(info_hash) == type('')
|
||||
assert type(khashmir_id) == type('')
|
||||
assert len(info_hash) == 20
|
||||
assert len(khashmir_id) == 20
|
||||
|
||||
try:
|
||||
token = self.table.tcache[self.id]
|
||||
except:
|
||||
token = None
|
||||
if token:
|
||||
assert type(token) == type("")
|
||||
assert len(token) == 20
|
||||
df = self.conn().sendRequest('announce_peer', {'info_hash':info_hash,
|
||||
'port':port,
|
||||
'id':khashmir_id,
|
||||
'token':token})
|
||||
else:
|
||||
raise KRPCProtocolError("no write token for node")
|
||||
df.addErrback(self.errBack)
|
||||
df.addCallback(self.checkSender)
|
||||
return df
|
||||
|
||||
def getPeers(self, info_hash, khashmir_id):
|
||||
df = self.conn().sendRequest('get_peers', {'info_hash':info_hash, 'id':khashmir_id})
|
||||
df.addErrback(self.errBack)
|
||||
df.addCallback(self.checkSender)
|
||||
return df
|
||||
|
||||
def checkSender(self, dict):
|
||||
d = knode.KNodeBase.checkSender(self, dict)
|
||||
try:
|
||||
self.table.tcache[d['rsp']['id']] = d['rsp']['token']
|
||||
except KeyError:
|
||||
pass
|
||||
return d
|
||||
|
||||
class UTStoreValue(StoreValue):
|
||||
def callNode(self, node, f):
|
||||
return f(self.target, self.value, node.token, self.table.node.id)
|
||||
|
||||
class UTKhashmir(khashmir.KhashmirBase):
|
||||
_Node = UTNode
|
||||
|
||||
def setup(self, host, port, data_dir, rlcount, checkpoint=True):
|
||||
khashmir.KhashmirBase.setup(self, host, port,data_dir, rlcount, checkpoint)
|
||||
self.cur_token = self.last_token = sha('')
|
||||
self.tcache = Cache()
|
||||
self.gen_token(loop=True)
|
||||
self.expire_cached_tokens(loop=True)
|
||||
|
||||
def expire_cached_tokens(self, loop=False):
|
||||
self.tcache.expire(time() - TOKEN_UPDATE_INTERVAL)
|
||||
if loop:
|
||||
self.rawserver.external_add_task(self.expire_cached_tokens, TOKEN_UPDATE_INTERVAL, (True,))
|
||||
|
||||
def gen_token(self, loop=False):
|
||||
self.last_token = self.cur_token
|
||||
self.cur_token = sha(newID())
|
||||
if loop:
|
||||
self.rawserver.external_add_task(self.gen_token, TOKEN_UPDATE_INTERVAL, (True,))
|
||||
|
||||
def get_token(self, host, port):
|
||||
x = self.cur_token.copy()
|
||||
x.update("%s%s" % (host, port))
|
||||
h = x.digest()
|
||||
return h
|
||||
|
||||
|
||||
def val_token(self, token, host, port):
|
||||
x = self.cur_token.copy()
|
||||
x.update("%s%s" % (host, port))
|
||||
a = x.digest()
|
||||
if token == a:
|
||||
return True
|
||||
|
||||
x = self.last_token.copy()
|
||||
x.update("%s%s" % (host, port))
|
||||
b = x.digest()
|
||||
if token == b:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def addContact(self, host, port, callback=None):
|
||||
# use dns on host, then call khashmir.addContact
|
||||
Thread(target=self._get_host, args=[host, port, callback]).start()
|
||||
|
||||
def _get_host(self, host, port, callback):
|
||||
ip = gethostbyname(host)
|
||||
self.rawserver.external_add_task(self._got_host, 0, (host, port, callback))
|
||||
|
||||
def _got_host(self, host, port, callback):
|
||||
khashmir.KhashmirBase.addContact(self, host, port, callback)
|
||||
|
||||
def announcePeer(self, info_hash, port, callback=None):
|
||||
""" stores the value for key in the global table, returns immediately, no status
|
||||
in this implementation, peers respond but don't indicate status to storing values
|
||||
a key can have many values
|
||||
"""
|
||||
def _storeValueForKey(nodes, key=info_hash, value=port, response=callback , table=self.table):
|
||||
if not response:
|
||||
# default callback
|
||||
def _storedValueHandler(sender):
|
||||
pass
|
||||
response=_storedValueHandler
|
||||
action = UTStoreValue(self, key, value, response, self.rawserver.add_task, "announcePeer")
|
||||
self.rawserver.external_add_task(action.goWithNodes, 0, (nodes,))
|
||||
|
||||
# this call is asynch
|
||||
self.findNode(info_hash, _storeValueForKey)
|
||||
|
||||
def krpc_announce_peer(self, info_hash, port, id, token, _krpc_sender):
|
||||
sender = {'id' : id}
|
||||
sender['host'] = _krpc_sender[0]
|
||||
sender['port'] = _krpc_sender[1]
|
||||
if not self.val_token(token, sender['host'], sender['port']):
|
||||
raise KRPCProtocolError("Invalid Write Token")
|
||||
value = compact_peer_info(_krpc_sender[0], port)
|
||||
self.store[info_hash] = value
|
||||
n = self.Node().initWithDict(sender)
|
||||
self.insertNode(n, contacted=0)
|
||||
return {"id" : self.node.id}
|
||||
|
||||
def retrieveValues(self, key):
|
||||
try:
|
||||
l = self.store.sample(key, NUM_PEERS)
|
||||
except KeyError:
|
||||
l = []
|
||||
return l
|
||||
|
||||
def getPeers(self, info_hash, callback, searchlocal = 1):
|
||||
""" returns the values found for key in global table
|
||||
callback will be called with a list of values for each peer that returns unique values
|
||||
final callback will be an empty list - probably should change to 'more coming' arg
|
||||
"""
|
||||
nodes = self.table.findNodes(info_hash, invalid=True)
|
||||
l = [x for x in nodes if x.invalid]
|
||||
if len(l) > 4:
|
||||
nodes = sample(l , 4) + self.table.findNodes(info_hash, invalid=False)[:4]
|
||||
|
||||
# get locals
|
||||
if searchlocal:
|
||||
l = self.retrieveValues(info_hash)
|
||||
if len(l) > 0:
|
||||
self.rawserver.external_add_task(callback, 0, ([reducePeers(l)],))
|
||||
else:
|
||||
l = []
|
||||
# create our search state
|
||||
state = GetValue(self, info_hash, callback, self.rawserver.add_task, 'getPeers')
|
||||
self.rawserver.external_add_task(state.goWithNodes, 0, (nodes, l))
|
||||
|
||||
def getPeersAndAnnounce(self, info_hash, port, callback, searchlocal = 1):
|
||||
""" returns the values found for key in global table
|
||||
callback will be called with a list of values for each peer that returns unique values
|
||||
final callback will be an empty list - probably should change to 'more coming' arg
|
||||
"""
|
||||
nodes = self.table.findNodes(info_hash, invalid=False)
|
||||
nodes += self.table.findNodes(info_hash, invalid=True)
|
||||
|
||||
# get locals
|
||||
if searchlocal:
|
||||
l = self.retrieveValues(info_hash)
|
||||
if len(l) > 0:
|
||||
self.rawserver.external_add_task(callback, 0, ([reducePeers(l)],))
|
||||
else:
|
||||
l = []
|
||||
# create our search state
|
||||
x = lambda a: a
|
||||
state = GetAndStore(self, info_hash, port, callback, x, self.rawserver.add_task, 'getPeers', "announcePeer")
|
||||
self.rawserver.external_add_task(state.goWithNodes, 0, (nodes, l))
|
||||
|
||||
def krpc_get_peers(self, info_hash, id, _krpc_sender):
|
||||
sender = {'id' : id}
|
||||
sender['host'] = _krpc_sender[0]
|
||||
sender['port'] = _krpc_sender[1]
|
||||
n = self.Node().initWithDict(sender)
|
||||
self.insertNode(n, contacted=0)
|
||||
|
||||
l = self.retrieveValues(info_hash)
|
||||
if len(l) > 0:
|
||||
return {'values' : [reducePeers(l)],
|
||||
"id": self.node.id,
|
||||
"token" : self.get_token(sender['host'], sender['port'])}
|
||||
else:
|
||||
nodes = self.table.findNodes(info_hash, invalid=False)
|
||||
nodes = [node.senderDict() for node in nodes]
|
||||
return {'nodes' : packNodes(nodes),
|
||||
"id": self.node.id,
|
||||
"token" : self.get_token(sender['host'], sender['port'])}
|
||||
|
BIN
locale/af/LC_MESSAGES/bittorrent.mo
Executable file
BIN
locale/af/LC_MESSAGES/bittorrent.mo
Executable file
Binary file not shown.
2749
locale/af/LC_MESSAGES/bittorrent.po
Executable file
2749
locale/af/LC_MESSAGES/bittorrent.po
Executable file
File diff suppressed because it is too large
Load Diff
BIN
locale/bg/LC_MESSAGES/bittorrent.mo
Executable file
BIN
locale/bg/LC_MESSAGES/bittorrent.mo
Executable file
Binary file not shown.
2875
locale/bg/LC_MESSAGES/bittorrent.po
Executable file
2875
locale/bg/LC_MESSAGES/bittorrent.po
Executable file
File diff suppressed because it is too large
Load Diff
BIN
locale/ca/LC_MESSAGES/bittorrent.mo
Executable file
BIN
locale/ca/LC_MESSAGES/bittorrent.mo
Executable file
Binary file not shown.
2930
locale/ca/LC_MESSAGES/bittorrent.po
Executable file
2930
locale/ca/LC_MESSAGES/bittorrent.po
Executable file
File diff suppressed because it is too large
Load Diff
BIN
locale/cs/LC_MESSAGES/bittorrent.mo
Executable file
BIN
locale/cs/LC_MESSAGES/bittorrent.mo
Executable file
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user