얼마전 케모노 주소가 변경되면서 기존 히토미 다운로더 개발자의 케모노용 스크립트가 동작하지 않길래 간단하게 수정해봄
원 개발자가 바쁜지 히토미 다운로더 업데이트가 좀 늦는감이 있는데 어쩔수 있나 목마른놈이 우물 파야지
aHR0cHM6Ly9raW8uYWMvYy9ib2JNTmdFTExETnZiWVlSY19JUEdi
키오에 파일 두개 업로드 했는데, 두개는 같은거고 브라우저나 안티바이러스가 의심파일이라며 다운로드 거절하는 경우가 있어서 두개 올림
변경점은 hds용 시그니쳐 삭제, 케모노 cr 도메인 추가 끝
귀찮으면 아래 코드 복사해서 플러그인 추가 해도 됨
#coding: utf8
#title: Kemono.Party 추가
#title_en: Add Kemono.Party
#comment: https://kemono.party
#author: Kurt Bestor
import downloader
from utils import Downloader, Session, Soup, LazyUrl, urljoin, get_ext, clean_title, get_print, print_error
import clf2
from translator import tr_
from ratelimit import limits, sleep_and_retry
class Image:
def __init__(self, url, referer, p):
self._url = url
self._p = p
self.url = LazyUrl(referer, self.get, self)
@sleep_and_retry
@limits(1, 5)
def get(self, _):
ext = get_ext(self._url) or downloader.get_ext(self._url)
self.filename = '{:04}{}'.format(self._p, ext)
return self._url
@Downloader.register
class Downloader_kemonoparty(Downloader):
type = 'kemono.party'
URLS = ['kemono.party', 'kemono.su', 'kemono.cr']
icon = 'base64:iVBORw0KGgoAAAANSUhEUgAAACAAAAAfCAYAAACGVs+MAAAH+0lEQVRIx72XaWxcVxXHf/ctM/Nm8YyX2mPHdrzGSYjrpNlJ2zRLgSSloi2bRCtQ+VChVCKRqEIFH6gECEpoUUJLS1JamqJAiwCBWoUKke5KuuA2zVbHbhLHdsbLjD3zPDNv3nb5YDtK1RQ7EnCk++Hp3nfO/5z7P//zHsD3gfWA4P9jmoAdwNNAEGB7Iqj2xYPqt4GyT3qrZUGMlgWxmUcBtAJhIAQ0zTEBoyqs7eqsNnK6Ku4PaVOvNN/QED31vXW1bkdl6C/AdZ/kLBzRIkJQC2jAk8CtwE3AM9PZxKcBXckqkhF9zw9urCvdu6J6ElgvBKjAhISOe5Zfs2ZbW3xh3vG39Odsx/HlKcAGaJgfJZe1kTDf9+ReYERXxPKAqqxVBY26KtpdnyPAT4GzwODlpQLmt5UH9963Nvn1WzsS2u9PZD7oHS89BORVVQgmSp7XEA/csaIuoi9NRuJN8eDmCzlncabo9oBI+W5A2I5NNK5nnZK3psLQ7osG1Pi1NcbqeEhbUhHSdNdnoxBUOL7cCxQBAqrAkyy9Lhnet2td7ZaOypAYzrscOJ5+YdzyDgKocgphRhVs3thUVv/GwCTNiaD6+QWJRXnH/1x/1rYLpdIpwC5ZnpSQWl4b/kZntdG6vrFMW1pjBDRFJFrLQ/W9mdL+RVWhQ6m8C4AnuXlTU2zfdz9du2LAdIiHVPrGS/LZk+N7DU3pdnyJCrBmXsQ6MWbVrqmPbryxMcb+7jEmbZ97ll+TSEb1zX0Tpc5sybsI1JcF1a8urQmv3LG6JrQsGaazxmB5bQQB9GQsoydT0lxfZnRF3PLFReW/3Lk62XKoL4sEtrbG+cPJzOg7Fws/cXw5AlMcYNB0sFyZrzS027e1JcJLqsP87nia1/rz3NlZqfmShUcH818A7owF1ZvuWFQRWlkbodzQUBVBWFdorwixqTlWv6AytEVXlNs3NpfdfndXVdX+7lFG8i7fWVvLpO3zm3fH3hownUcA5xKAaRt3fbn2hsZoR0siSFdNmMfeGeWF3iy3LSyn6PpGf9YOTdo+r/abdKcKRAMq9bEAAU1BAvGQSmdNWNnUHItf3xALPt49xtPH0uxYVcOKujD/uljgwPvpg9va4odOjllcDkADEhnLUxZXGdu6asJiJO/yzPEMy5Jhvrmsiq1tcaIBldNjFqbtcz5r8+KHOU6nLSoNlbqojqYoeFKiKQJdFWiK4JV+k1hAZXNznL+dmbBe6M3u7smUzvjT5JsBEAEe9iXNuqI0fKalzBgwbYZMhwfW11EV1ghqCqvrIixLRhgwbQZNG9uT9GQsXvwwx/msQzKqUx3R0YTAl5CM6kR0hdNpi4Sh8dypTP/ZCft1X7INeA+wxYwcAhsrDe2p9opg9Y821FMb1ck7Hg1lAWbQAqhCMFpweOLdMZ56L8245V7aq4nobG2LU25oDJo2qUmH4bzDYM7BR1J0/IwvmQB+DTwISAFsBb4V0kQ6oqsrv7K4fPHdS6uoLwsgAXlZ8EviIsCX8Eq/yYNvpOhOFeagwgCUgKNC8LiUVAFHVWAIcGIB9cvrm2KfWj0vSldNGFX5z9IugPbyEK3lQQ71ZbFcecVz7RUhbm4u45b2BMmIrs2LBZITlruh6Mo88E8NWAU0qULki44vm+IBEZpm9Wzm+pLOaoNlyTCHz5kf229JBHl0SyNLrjFQhODo4CS7j6QCRUe+CvwKGFKATmCppgjd8/FM259rOZFAJKByc3P8Y9MrqAq2r6jm2uowEvClRALz40FZHdE2APuAXQqwB9iZLrrHPSlFT8Zi0HQu3fWsIKTkxsYoreVBdEVcArK1LcFtCxP4UqIIMG2PkbzDqbGiO5CznwS+BOxWgSjwQ1WIzkzR7TZtv1EIdNeXBDWFiK7MWoV4SGNdQ5RNzWXc0BhjVV2Eu66tpCaiowjBkOlwcrTIb4+lnZztj4xb3mJPkgFeFUyJUCNTA2nHgsrQTrPkjW1rTzSPFVz/xxvmqWVBdVZOKEIww9uZkgsgNelw+JzJwRMZK6AK8dqFyfuB94Ek8CcV8IFxoFLCvWMFd7cnSV0wnaZ3U4WD82KBrq5kWJ0NwFTQqTXTup6E3UeGnb/3ZcWZTOnpCzmn1/PlMuAXwJuAc/kscIGXBRy2fbk5V/J6PMkDowV35aq6aEtVWJtTZ8yYKgRvDeXZ8+bIsx9krKGSJ/tcX+5i6lvhHFCAjw4jd7oSACPAy5oiUkOTTkpXxS3rGqKGMhdWMqURRU+y563hi69fmNwOvD0d8J3p8l9SLvUTfIwA2WkJ7h80ndqOytDqtorQFZXxY9krgpfO53j07dHHCo7/DHAeOH5F7swhITc16ew58H765FjBmbU1BZC1XJ47Od4zWnD3MUWPmXX1ABRFYUF5sPelc+bDfz49Yc96Xgj+cTYnD583n7hrScWZWas12wEpJWnLw5OcGc67Xctrwx01kcAV0xECRvIuPz8y3P1B2rr/2EjRnM3/XK6AtpiKqgjzxGjxZweOpUct17/ij4MAnu+dcF4fmHxkXkwfmovvWSsAkLHlDPkGBk2norU8eP3CKgMhBKoQ0yIkODth89DR4Zcu5OwHTNu3/msALr+RguP35ErehkVVRvJMxuLIYJ6etIXrw197xvN/PDW+K6Ir7zn+1ajGVVhVWMPQla81lAUKYV2RAqQikJWGJisM7VnA+N9E/qhFgef5aIvlgM9eraN/A7XCX1o55MN7AAAAAElFTkSuQmCC'
display_name = 'Kemono.Party'
MAX_CORE = 4
def read(self):
self.session = Session()
info = read_info(self.url, self.session, self.cw)
type_ = info['type']
if type_ == 'post':
for img in info['imgs']:
self.urls.append(img.url)
self.title = info['title']
else:
raise NotImplementedError(type_)
def read_info(url, session, cw):
print_ = get_print(cw)
info = {}
if '/post/' in url:
type_ = 'post'
else:
raise NotImplementedError('Not a post')
info['type'] = type_
res = clf2.solve(url, session=session, cw=cw)
soup = Soup(res['html'])
info_ = read_post(url, soup, cw)
info['imgs'] = info_['imgs']
tail = ' (kemono.party_{}_{})'.format(type_, info_['id'])
info['title'] = '{}{}'.format(clean_title(info_['title'], allow_dot=True, n=-len(tail)), tail)
return info
def read_post(url, soup, cw):
print_ = get_print(cw)
info = {}
info['title'] = soup.find('h1').text
info['id'] = soup.find('meta', {'name': 'id'})['content']
imgs = []
# Downloads
for item in soup.findAll('li', class_='post__attachment'):
href = urljoin(url, item.find('a')['href'])
img = Image(href, url, len(imgs))
imgs.append(img)
# Files
files = soup.find('div', class_='post__files')
if files:
for item in files.findChildren(recursive=False):
if 'href' in item.attrs:
a = item
else:
a = item.find('a')
href = urljoin(url, a['href'])
# Imgur
if 'imgur.com/' in href:
print_('Imgur: {}'.format(href))
try:
from extractor import imgur_downloader
for img in imgur_downloader.get_imgs(href):
img = Image(img, href, len(imgs))
imgs.append(img)
except Exception as e:
print_(print_error(e))
continue
img = Image(href, url, len(imgs))
imgs.append(img)
# Content
content = soup.find('div', class_='post__content')
if content:
for img in content.findAll('img'):
src = urljoin(url, img['src'])
img = Image(src, url, len(imgs))
imgs.append(img)
info['imgs'] = imgs
return info
messageBox(u'{}: Kemono.Party'.format(tr_(u'사이트를 추가했습니다')))
