getscrapedata.py :  » Network » Torrent-Swapper » swapper » Utility » Python Open Source

Home
Python Open Source
1.3.1.2 Python
2.Ajax
3.Aspect Oriented
4.Blog
5.Build
6.Business Application
7.Chart Report
8.Content Management Systems
9.Cryptographic
10.Database
11.Development
12.Editor
13.Email
14.ERP
15.Game 2D 3D
16.GIS
17.GUI
18.IDE
19.Installer
20.IRC
21.Issue Tracker
22.Language Interface
23.Log
24.Math
25.Media Sound Audio
26.Mobile
27.Network
28.Parser
29.PDF
30.Project Management
31.RSS
32.Search
33.Security
34.Template Engines
35.Test
36.UML
37.USB Serial
38.Web Frameworks
39.Web Server
40.Web Services
41.Web Unit
42.Wiki
43.Windows
44.XML
Python Open Source » Network » Torrent Swapper 
Torrent Swapper » swapper » Utility » getscrapedata.py
import re
import binascii
import sys
from threading import Thread,Event
from safeguiupdate import DelayedEventHandler

# The ScrapeThread calls ABCTorrent to update the info. As that updates
# the GUI, those updates must be done by the MainThread and not this
# scraping thread itself.


DEBUG = False

################################################################
#
# Class: ScrapeThread
#
# Retrieves scrape data from a tracker.
#
################################################################
class ScrapeThread(Thread,DelayedEventHandler):

    def __init__(self, utility, torrent, manualscrape = False):
        Thread.__init__(self, None, None, None)
        DelayedEventHandler.__init__(self)
        self.doneflag = Event()

        self.torrent = torrent
        self.utility = utility
        self.manualscrape = manualscrape
        self.status = self.utility.lang.get('scraping')
        self.currentseed = "?"
        self.currentpeer = "?"

        self.setName( "Scrape"+self.getName() )
        

    def run(self):
        self.GetScrapeData()

    def GetScrapeData(self):
        if DEBUG:
            print "scrapethread: scraping..."
        
        # connect scrape at tracker and get data
        # save at self.currentpeer, self.currentseed
        # if error put '?'

        # The thread itself will update the list for its scraping infos
        self.updateTorrent()
        
        metainfo = self.torrent.metainfo

        if metainfo is None:
            self.status = self.utility.lang.get('cantreadmetainfo')
            self.updateTorrent()
            return

        if DEBUG:
            print "scrapethread: got metainfo"
        
        announce = None    
        if 'announce' in metainfo:
            announce = metainfo['announce']
        elif 'announce-list' in metainfo:
            announce_list = metainfo['announce-list']
            announce = announce_list[0][0]
        
        if announce is None:
            self.status = self.utility.lang.get('noannouncetrackerinmeta')
            self.updateTorrent()
            return
        
        if DEBUG:
            print "scrapethread: got announce"
            
#        sys.stdout.write('Announce URL: ' + announce + '\n');

        # Does tracker support scraping?
        ix = announce.rfind('/')
        if ((ix == -1) or (announce.rfind("/announce") != ix)):
            # Tracker doesn't support scraping
            self.status = self.utility.lang.get('trackernoscrape')
            self.updateTorrent()
            return

        p = re.compile('(.*/)[^/]+')
        surl = p.sub(r'\1', announce)
        #sys.stdout.write('sURL1: ' + surl + '\n')
        #Fix this to comply with scrape standards.
        ix = announce.rindex('/')
        #tmp = 'ix: '.join(ix)
        #sys.stdout.write('ix: ' + str(ix) + '\n')
        if (ix + 9) > len(announce):
            ix2 = len(announce)
        else:
            ix2 = ix + 9
        #sys.stdout.write('ix: ' + announce[(ix + 1):(ix2)] + '\n')
        if announce[(ix + 1):(ix2)].endswith("announce", 0):
            #sys.stdout.write('!!!VALID SCRAPE URL!!!' + '\n')
            #sys.stdout.write('sURLTrue: ' + surl + 'scrape' + announce[(ix2):] + '\n');
        # fix for some private trackers (change ? to &):
            if '?' in announce[ix2:]:
                infohashprefix = '&'
            else:
                infohashprefix = '?'
            surl = surl + 'scrape' + announce[ix2:] + infohashprefix + 'info_hash='
        #end new Scrape URL Code
        info_hash_hex = self.torrent.infohash
        hashlen = len(info_hash_hex)
        for i in range(0, hashlen):
            if (i % 2 == 0):
                surl = surl + "%"
            surl = surl + info_hash_hex[i]
            
        if DEBUG:
            print "scrapethread: tring to scrape"
            
        # connect scrape URL
        scrapedata = self.utility.getMetainfo(surl, style = "url")

        if scrapedata is None or not 'files' in scrapedata:
            self.status = self.utility.lang.get('cantgetdatafromtracker')                        
        else:
            scrapedata = scrapedata['files']
            for i in scrapedata.keys():
                if binascii.b2a_hex(i) == info_hash_hex:
                    self.currentpeer = str(scrapedata[i]['incomplete'])
                    self.currentseed = str(scrapedata[i]['complete'])
                    self.status = self.utility.lang.get('scrapingdone')
        
        self.updateTorrent()
        
        if DEBUG:
            print "scrapethread: done scraping"

    def updateTorrent(self):
        self.invokeLater(self.OnUpdateTorrent, [])


    def OnUpdateTorrent(self):
        if not self.manualscrape:
            # Don't update status information if doing an automatic scrape
            status = ""
        else:
            status = self.status
        
        # The thread itself will update the list for its scraping infos
        self.torrent.updateScrapeData(self.currentpeer, self.currentseed, status)
www.java2java.com | Contact Us
Copyright 2009 - 12 Demo Source and Support. All rights reserved.
All other trademarks are property of their respective owners.