기존거는 작가명이 안 가져와서 수정하였습니다. 코드 복붙 후 .py 로 저장하면
히토미 다운로더 > 도구 > 스크립트 가져오기 하면 됩니다.
2개 를 ai로 참조해서 만듬
https://kone.gg/s/all/atD-6vimaNIvrWkpU5IPGb?p=1
[히토미 다운로더용 케모노 다운로드 스크립트]
https://github.com/KurtBestor/Hitomi-Downloader/issues/5843
[작가명으로 모든 포스트 리스트 가져오기]
#coding: utf8
#title: Kemono.Party / Su / Cr (JS Logic Merged)
#title_en: Kemono.Party / Su / Cr (JS Logic Merged)
#comment: https://kemono.cr
#author: Kurt Bestor (JS Logic Merged)
import downloader
from utils import Downloader, Session, Soup, LazyUrl, urljoin, get_ext, clean_title, get_print, print_error
import clf2
from translator import tr_
from ratelimit import limits, sleep_and_retry
class Image:
def __init__(self, url, referer, p):
self._url = url
self._p = p
self.url = LazyUrl(referer, self.get, self)
@sleep_and_retry
@limits(1, 5)
def get(self, _):
ext = get_ext(self._url) or downloader.get_ext(self._url)
self.filename = '{:04}{}'.format(self._p, ext)
return self._url
@Downloader.register
class Downloader_kemonoparty(Downloader):
type = 'kemono.party'
URLS = ['kemono.party', 'kemono.su', 'kemono.cr']
icon = 'base64:iVBORw0KGgoAAAANSUhEUgAAACAAAAAfCAYAAACGVs+MAAAH+0lEQVRIx72XaWxcVxXHf/ctM/Nm8YyX2mPHdrzGSYjrpNlJ2zRLgSSloi2bRCtQ+VChVCKRqEIFH6gECEpoUUJLS1JamqJAiwCBWoUKke5KuuA2zVbHbhLHdsbLjD3zPDNv3nb5YDtK1RQ7EnCk++Hp3nfO/5z7P//zHsD3gfWA4P9jmoAdwNNAEGB7Iqj2xYPqt4GyT3qrZUGMlgWxmUcBtAJhIAQ0zTEBoyqs7eqsNnK6Ku4PaVOvNN/QED31vXW1bkdl6C/AdZ/kLBzRIkJQC2jAk8CtwE3AM9PZxKcBXckqkhF9zw9urCvdu6J6ElgvBKjAhISOe5Zfs2ZbW3xh3vG39Odsx/HlKcAGaJgfJZe1kTDf9+ReYERXxPKAqqxVBY26KtpdnyPAT4GzwODlpQLmt5UH9963Nvn1WzsS2u9PZD7oHS89BORVVQgmSp7XEA/csaIuoi9NRuJN8eDmCzlncabo9oBI+W5A2I5NNK5nnZK3psLQ7osG1Pi1NcbqeEhbUhHSdNdnoxBUOL7cCxQBAqrAkyy9Lhnet2td7ZaOypAYzrscOJ5+YdzyDgKocgphRhVs3thUVv/GwCTNiaD6+QWJRXnH/1x/1rYLpdIpwC5ZnpSQWl4b/kZntdG6vrFMW1pjBDRFJFrLQ/W9mdL+RVWhQ6m8C4AnuXlTU2zfdz9du2LAdIiHVPrGS/LZk+N7DU3pdnyJCrBmXsQ6MWbVrqmPbryxMcb+7jEmbZ97ll+TSEb1zX0Tpc5sybsI1JcF1a8urQmv3LG6JrQsGaazxmB5bQQB9GQsoydT0lxfZnRF3PLFReW/3Lk62XKoL4sEtrbG+cPJzOg7Fws/cXw5AlMcYNB0sFyZrzS027e1JcJLqsP87nia1/rz3NlZqfmShUcH818A7owF1ZvuWFQRWlkbodzQUBVBWFdorwixqTlWv6AytEVXlNs3NpfdfndXVdX+7lFG8i7fWVvLpO3zm3fH3hownUcA5xKAaRt3fbn2hsZoR0siSFdNmMfeGeWF3iy3LSyn6PpGf9YOTdo+r/abdKcKRAMq9bEAAU1BAvGQSmdNWNnUHItf3xALPt49xtPH0uxYVcOKujD/uljgwPvpg9va4odOjllcDkADEhnLUxZXGdu6asJiJO/yzPEMy5Jhvrmsiq1tcaIBldNjFqbtcz5r8+KHOU6nLSoNlbqojqYoeFKiKQJdFWiK4JV+k1hAZXNznL+dmbBe6M3u7smUzvjT5JsBEAEe9iXNuqI0fKalzBgwbYZMhwfW11EV1ghqCqvrIixLRhgwbQZNG9uT9GQsXvwwx/msQzKqUx3R0YTAl5CM6kR0hdNpi4Sh8dypTP/ZCft1X7INeA+wxYwcAhsrDe2p9opg9Y821FMb1ck7Hg1lAWbQAqhCMFpweOLdMZ56L8245V7aq4nobG2LU25oDJo2qUmH4bzDYM7BR1J0/IwvmQB+DTwISAFsBb4V0kQ6oqsrv7K4fPHdS6uoLwsgAXlZ8EviIsCX8Eq/yYNvpOhOFeagwgCUgKNC8LiUVAFHVWAIcGIB9cvrm2KfWj0vSldNGFX5z9IugPbyEK3lQQ71ZbFcecVz7RUhbm4u45b2BMmIrs2LBZITlruh6Mo88E8NWAU0qULki44vm+IBEZpm9Wzm+pLOaoNlyTCHz5kf229JBHl0SyNLrjFQhODo4CS7j6QCRUe+CvwKGFKATmCppgjd8/FM259rOZFAJKByc3P8Y9MrqAq2r6jm2uowEvClRALz40FZHdE2APuAXQqwB9iZLrrHPSlFT8Zi0HQu3fWsIKTkxsYoreVBdEVcArK1LcFtCxP4UqIIMG2PkbzDqbGiO5CznwS+BOxWgSjwQ1WIzkzR7TZtv1EIdNeXBDWFiK7MWoV4SGNdQ5RNzWXc0BhjVV2Eu66tpCaiowjBkOlwcrTIb4+lnZztj4xb3mJPkgFeFUyJUCNTA2nHgsrQTrPkjW1rTzSPFVz/xxvmqWVBdVZOKEIww9uZkgsgNelw+JzJwRMZK6AK8dqFyfuB94Ek8CcV8IFxoFLCvWMFd7cnSV0wnaZ3U4WD82KBrq5kWJ0NwFTQqTXTup6E3UeGnb/3ZcWZTOnpCzmn1/PlMuAXwJuAc/kscIGXBRy2fbk5V/J6PMkDowV35aq6aEtVWJtTZ8yYKgRvDeXZ8+bIsx9krKGSJ/tcX+5i6lvhHFCAjw4jd7oSACPAy5oiUkOTTkpXxS3rGqKGMhdWMqURRU+y563hi69fmNwOvD0d8J3p8l9SLvUTfIwA2WkJ7h80ndqOytDqtorQFZXxY9krgpfO53j07dHHCo7/DHAeOH5F7swhITc16ew58H765FjBmbU1BZC1XJ47Od4zWnD3MUWPmXX1ABRFYUF5sPelc+bDfz49Yc96Xgj+cTYnD583n7hrScWZWas12wEpJWnLw5OcGc67Xctrwx01kcAV0xECRvIuPz8y3P1B2rr/2EjRnM3/XK6AtpiKqgjzxGjxZweOpUct17/ij4MAnu+dcF4fmHxkXkwfmovvWSsAkLHlDPkGBk2norU8eP3CKgMhBKoQ0yIkODth89DR4Zcu5OwHTNu3/msALr+RguP35ErehkVVRvJMxuLIYJ6etIXrw197xvN/PDW+K6Ir7zn+1ajGVVhVWMPQla81lAUKYV2RAqQikJWGJisM7VnA+N9E/qhFgef5aIvlgM9eraN/A7XCX1o55MN7AAAAAElFTkSuQmCC'
display_name = 'Kemono.Party / Su / Cr'
MAX_CORE = 4
def read(self):
self.session = Session()
# 도메인 강제 업데이트 (kemono.cr)
clean_url = self.url.split('?')[0].replace('kemono.party', 'kemono.cr').replace('kemono.su', 'kemono.cr')
info = read_info(clean_url, self.session, self.cw)
for img in info['imgs']:
self.urls.append(img.url)
self.title = info['title']
def read_info(url, session, cw):
print_ = get_print(cw)
info = {}
# URL 파싱
parts = url.strip('/').split('/')
if 'post' in parts:
# 단일 포스트 처리
info['type'] = 'post'
print_('Type: Single Post')
res = clf2.solve(url, session=session, cw=cw)
soup = Soup(res['html'])
info_ = read_post_content(url, soup, cw)
info['imgs'] = info_['imgs']
tail = f' (kemono_post_{info_["id"]})'
info['title'] = '{}{}'.format(clean_title(info_['title'], allow_dot=True, n=-len(tail)), tail)
elif 'user' in parts:
# 작가 전체 처리 (JS 로직 병합)
info['type'] = 'artist'
print_('Type: Artist Gallery (JS Logic Merged)')
# 1. 초기 변수 설정 (JS: const galerias = [])
post_urls = []
artist_name = None
current_url = url
page_num = 1
# 2. 페이지 순회 (JS: async function traeGalerias() 내의 반복문)
# JS 코드의 'for (let j = 0; j < 30 && url; ++j)' 로직을 while문으로 구현
# 전체 다운로드를 위해 30페이지 제한은 제거하거나 매우 크게 설정 (여기선 안전장치로 1000)
while current_url and page_num < 1000:
print_(f'Page {page_num}: Fetching list...')
try:
# JS: const response = await fetch(url);
res = clf2.solve(current_url, session=session, cw=cw)
soup = Soup(res['html'])
# 작가 이름 추출 (첫 페이지에서만)
if artist_name is None:
name_tag = soup.find('span', itemprop='name')
artist_name = name_tag.text.strip() if name_tag else None
# JS: const elementos = pagina.querySelector('.card-list__items').getElementsByTagName("a");
card_list = soup.find(class_='card-list__items')
if card_list:
links = card_list.find_all('a')
new_links_count = 0
# JS: galerias.push(elementos[i].href);
for link in links:
if 'href' in link.attrs:
# 상대 경로를 절대 경로로 변환 (JS의 href 속성은 브라우저가 자동으로 절대경로 처리하므로 이에 대응)
full_post_url = urljoin(current_url, link['href'])
# 중복 방지 및 포스트 링크만 필터링
if '/post/' in full_post_url and full_post_url not in post_urls:
post_urls.append(full_post_url)
new_links_count += 1
print_(f' Found {new_links_count} posts (Total: {len(post_urls)})')
# JS: url = pagina.querySelector('.next')?.href;
next_btn = soup.select_one('.next')
if next_btn and 'href' in next_btn.attrs:
current_url = urljoin(current_url, next_btn['href'])
page_num += 1
else:
print_('No more pages found.')
current_url = None
except Exception as e:
print_error(f'Error reading page {current_url}: {e}')
break
# 3. 수집된 링크 다운로드 시작 (JS: mostrarGaleria -> 여기선 실제 다운로드 로직)
print_(f'Total unique posts found: {len(post_urls)}')
print_('Starting content parsing...')
all_imgs = []
for i, post_url in enumerate(post_urls):
try:
# print_(f'Processing [{i+1}/{len(post_urls)}]: {post_url}')
post_res = clf2.solve(post_url, session=session, cw=cw)
post_soup = Soup(post_res['html'])
post_data = read_post_content(post_url, post_soup, cw, current_count=len(all_imgs))
all_imgs.extend(post_data['imgs'])
except Exception as e:
print_(f'Error reading post content {post_url}: {e}')
continue
info['imgs'] = all_imgs
# 제목 설정
try:
user_idx = parts.index('user')
user_id = parts[user_idx + 1]
service = parts[user_idx - 1]
except:
user_id = 'unknown'
service = 'unknown'
tail = f' (kemono_{service}_{user_id})'
final_title = artist_name if artist_name else user_id
info['title'] = '{}{}'.format(clean_title(final_title, allow_dot=True, n=-len(tail)), tail)
else:
raise NotImplementedError('Not a post or user page')
return info
def read_post_content(url, soup, cw, current_count=0):
# 게시물 내용 파싱
info = {}
title_tag = soup.find('h1', class_='post__title')
info['title'] = title_tag.text.strip() if title_tag else 'Untitled'
meta_id = soup.find('meta', {'name': 'id'})
info['id'] = meta_id['content'] if meta_id else 'unknown'
imgs = []
count = current_count
# 1. Attachments (Downloads)
# JS 로직에는 없지만 다운로더의 완결성을 위해 유지
for item in soup.findAll('li', class_='post__attachment'):
a_tag = item.find('a')
if a_tag:
href = urljoin(url, a_tag['href'])
imgs.append(Image(href, url, count))
count += 1
# 2. Files (Thumbnails/Previews)
files = soup.find('div', class_='post__files')
if files:
for item in files.findChildren(recursive=False):
a = item if 'href' in item.attrs else item.find('a')
if a:
href = urljoin(url, a['href'])
if 'imgur.com/' in href: # Imgur 예외처리
try:
from extractor import imgur_downloader
for img_url in imgur_downloader.get_imgs(href):
imgs.append(Image(img_url, href, count))
count += 1
except: pass
continue
imgs.append(Image(href, url, count))
count += 1
# 3. Content Images
content = soup.find('div', class_='post__content')
if content:
for img_tag in content.findAll('img'):
src = urljoin(url, img_tag['src'])
imgs.append(Image(src, url, count))
count += 1
info['imgs'] = imgs
return info
messageBox(u'{}: Kemono.Party / Su / Cr (JS Logic Merged)'.format(tr_(u'사이트를 추가했습니다')))
