Re: Kartina и XBMC (windows,linux,mac на xbox не работает)
Будет ли плагин обновляться, как насчёт поддержки Boxee и Plex?
Будет ли плагин обновляться, как насчёт поддержки Boxee и Plex?
# -*- coding: utf-8 -*- # from BeautifulSoup import BeautifulSoup import elementtree.ElementTree from xml.sax.saxutils import unescape import re import os import xbmc import xbmcutils.net import cookielib import urllib import urllib2 from threading import Thread import pickle userid = "xxxxxx" userpwd = "xxxxxx" class threadDownloadURL(Thread): def __init__ (self, url,base): Thread.__init__(self) self.url = url # BASE_CACHE_PATH = os.path.join( "P:\\", "Thumbnails", "Video" ) filename = xbmc.getCacheThumbName(self.url ) self.filepath = xbmc.translatePath( os.path.join( base, filename[ 0 ], filename ) ) bd=os.path.dirname(self.filepath) if not os.path.exists(bd): os.makedirs(bd) def run(self): try: if ( not os.path.isfile( self.filepath ) ): urllib.urlretrieve( self.url, self.filepath) except: if ( os.path.isfile( self.filepath ) ): os.remove(self.filepath ) class ParseError(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class KTVHarvester: DIR = 0 VIDEO = 1 base_url ="http://iptv.kartina.tv" base_headers =[ ('Accept-Encoding','gzip, deflate'), ('Accept-Language','en-us'), ('Connection','keep-alive'), ('Accept','*/*'), ('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7'), ('Keep-Alive','300'), ('Referer','http://iptv.kartina.tv/'), ('User-agent','Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_4; en-us) AppleWebKit/525.18 (KHTML, like Gecko) Version/3.1.2 Safari/525.20.1') ] post_base = "act=login&code_login=%s&code_pass=%s" stream_base = base_url+"/?protect_code=%s&m=channels&act=get_stream_url&cid=%s" list_url = base_url+"/?m=channels&act=get_list_xml" # thumb_base = base_url+"/img/ico/24/%s.gif" thumb_base = "http://www.kartina.tv/images/icons/c...s/%s.gif" cj = cookielib.LWPCookieJar() COOKIEFILE = 'cookies.lwp' def __init__(self,bp): self.base_path = bp self.flash_pattern = re.compile('http://[^"]+') self.cache_path = os.path.join(self.base_path, "data" ) if not os.path.exists(self.cache_path): os.mkdir(self.cache_path) opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj)) opener.addheaders = self.base_headers urllib2.install_opener(opener) self.cfile=os.path.join(self.cache_path, self.COOKIEFILE) if os.path.isfile(self.cfile): self.cj.load(self.cfile) def set_report_hook(self, func, udata=None): self.report_hook = func self.report_udata = udata def parse_list(self, data): list = [] tree = elementtree.ElementTree.XML(data) sz=len(tree) i=0 thumb_path=os.path.join(self.cache_path,"thumbs") for node in tree: # sect=node.get("title") # if sect == 'Ðадио': sect=node.get("id") if sect == '23': kind="R" else: kind="T" j=0 dls=[] for chan in node: id=chan.get("id") title=chan.get("title") section = sect program=chan.get("programm") if (program==None) : program=" " dl = threadDownloadURL(self.thumb_base %(id),thumb_path) thumb=dl.filepath dl.start() dls.append(dl) list.append((id,title,thumb,section,program,kind)) j=j+1 for dl in dls: dl.join() i+=1 self.report_hook(i,sz,self.report_udata) return list def download_link(self,id): data = self.getData(self.stream_base % (userpwd,id)) link=re.search("http[^ ]*.",data).group(0).replace("\"","").strip() return link def download_list(self): path=os.path.join(self.cache_path, "channels.dat") if (os.path.isfile(path)): fp=open(path, 'rb') list=[] list=pickle.load(fp) fp.close() else: data = self.getData(self.list_url) list = self.parse_list(data) fp=open(path,'wb') pickle.dump(list,fp) fp.close() return list def getData(self,url): req = urllib2.Request(url) u = urllib2.urlopen(req) data = u.read() if (data.find('code_login')!=-1): self.Authorize() u = urllib2.urlopen(req) data = u.read() self.cj.save(self.cfile) return data def Authorize(self): txtdata= self.post_base % (userid,userpwd) req = urllib2.Request(self.base_url, txtdata) u = urllib2.urlopen(req) data = u.read() self.cj.save(self.cfile)
Комментарий