2011-09-13 04:26:50 +00:00
|
|
|
import re
|
2011-09-13 05:27:04 +00:00
|
|
|
import socket
|
2011-09-13 04:26:50 +00:00
|
|
|
from urllib2 import urlopen
|
|
|
|
from BeautifulSoup import BeautifulSoup, BeautifulStoneSoup
|
|
|
|
|
2011-09-13 05:27:04 +00:00
|
|
|
socket.setdefaulttimeout(15)
|
|
|
|
|
2011-09-13 04:26:50 +00:00
|
|
|
class Search:
|
|
|
|
def __init__(self):
|
|
|
|
return NotImplemented
|
|
|
|
def search(terms):
|
|
|
|
return NotImplemented
|
|
|
|
|
|
|
|
class BTJunkie(Search):
|
|
|
|
def __init__(self):
|
2011-09-13 05:27:04 +00:00
|
|
|
self.search_uri = 'http://btjunkie.org/rss.xml?query=%s&o=52'
|
2011-09-13 04:26:50 +00:00
|
|
|
def search(self, terms):
|
|
|
|
torrents = []
|
2011-09-13 05:27:04 +00:00
|
|
|
url = self.search_uri % '+'.join(terms.split(' '))
|
2011-09-13 04:26:50 +00:00
|
|
|
f = urlopen(url)
|
|
|
|
soup = BeautifulStoneSoup(f.read())
|
|
|
|
for item in soup.findAll('item'):
|
|
|
|
(name, seeds, leechers) = re.findall('(.*?)\s+\[(\d+)\/(\d+)\]$', item.title.text)[0]
|
|
|
|
torrents.append({
|
|
|
|
'url': item.enclosure['url'],
|
|
|
|
'name': name,
|
|
|
|
'seeds': int(seeds),
|
2011-09-13 12:32:14 +00:00
|
|
|
'leechers': int(leechers),
|
|
|
|
})
|
|
|
|
return torrents
|
|
|
|
class Mininova(Search):
|
|
|
|
def __init__(self):
|
|
|
|
self.search_uri = 'http://www.mininova.org/rss/%s'
|
|
|
|
def search(self, terms):
|
|
|
|
torrents = []
|
|
|
|
url = self.search_uri % '+'.join(terms.split(' '))
|
|
|
|
f = urlopen(url)
|
|
|
|
soup = BeautifulStoneSoup(f.read())
|
|
|
|
for item in soup.findAll('item'):
|
|
|
|
(seeds, leechers) = re.findall('Ratio: (\d+) seeds, (\d+) leechers', item.description.text)[0]
|
|
|
|
torrents.append({
|
|
|
|
'url': item.enclosure['url'],
|
|
|
|
'name': item.title.text,
|
|
|
|
'seeds': int(seeds),
|
2011-09-13 04:26:50 +00:00
|
|
|
'leechers': int(leechers),
|
|
|
|
})
|
|
|
|
return torrents
|
|
|
|
class TPB(Search):
|
|
|
|
def __init__(self):
|
2011-09-13 05:27:04 +00:00
|
|
|
self.search_uri = 'http://thepiratebay.org/search/%s/'
|
2011-09-13 04:26:50 +00:00
|
|
|
def search(self, terms):
|
|
|
|
torrents = []
|
2011-09-13 05:27:04 +00:00
|
|
|
url = self.search_uri % '+'.join(terms.split(' '))
|
2011-09-13 04:26:50 +00:00
|
|
|
f = urlopen(url)
|
|
|
|
soup = BeautifulSoup(f.read())
|
|
|
|
for details in soup.findAll('a', {'class': 'detLink'}):
|
|
|
|
name = details.text
|
|
|
|
url = details.findNext('a', {'href': re.compile('^http:\/\/torrents\.thepiratebay\.org\/')})['href']
|
|
|
|
td = details.findNext('td')
|
|
|
|
seeds = int(td.text)
|
|
|
|
td = td.findNext('td')
|
|
|
|
leechers = int(td.text)
|
|
|
|
torrents.append({
|
|
|
|
'url': url,
|
|
|
|
'name': name,
|
|
|
|
'seeds': seeds,
|
|
|
|
'leechers': leechers,
|
|
|
|
})
|
|
|
|
return torrents
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
s = TPB()
|
|
|
|
results = s.search('zettai')
|