This commit is contained in:
2026-03-30 23:05:53 -05:00
parent ac9d9c42e8
commit 73dfdf9a85
16 changed files with 1947 additions and 1173 deletions

View File

@@ -1,12 +1,11 @@
{
"name": "AnimeKai Streamer",
"version": "1.2.1",
"version": "1.3.0",
"author": "Animex",
"description": "Resolves AnimeKai.to streams using backend encrypted APIs. Fully optimized for Cloud Tunneling.",
"description": "Resolves AnimeKai.to streams using backend encrypted APIs via Cloud Tunnel. Supports Sub and Dub. No Selenium.",
"type": ["ANIME_STREAMER"],
"requirements":["httpx", "beautifulsoup4"]
"requirements": ["httpx", "beautifulsoup4"]
}
---
import re
import httpx
@@ -36,37 +35,37 @@ HEADERS = {
# ENCRYPT / DECRYPT HELPERS
# =========================
async def enc_kai(client, text: str) -> str:
r = await client.get(
async def enc_kai(text: str) -> str:
r = await httpx.get(
f"{ENC_API}/enc-kai",
params={"text": text},
headers=HEADERS,
timeout=10
timeout=10,
headers=HEADERS
)
r.raise_for_status()
return r.json()["result"]
async def dec_kai(client, text: str) -> str:
r = await client.post(
async def dec_kai(text: str) -> str:
r = await httpx.post(
f"{ENC_API}/dec-kai",
json={"text": text},
headers=HEADERS,
timeout=10
timeout=10,
headers=HEADERS
)
r.raise_for_status()
return r.json()["result"]
async def dec_mega(client, text: str) -> Dict:
r = await client.post(
async def dec_mega(text: str) -> Dict:
r = await httpx.post(
f"{ENC_API}/dec-mega",
json={
"text": text,
"agent": HEADERS["User-Agent"]
},
headers=HEADERS,
timeout=15
timeout=15,
headers=HEADERS
)
r.raise_for_status()
return r.json()
@@ -75,9 +74,9 @@ async def dec_mega(client, text: str) -> Dict:
# INTERNAL LOGIC
# =========================
async def get_anime_id(client, slug: str) -> str:
# slug is actually mal_id (int)
resp = await client.get(f"{DB_URL}?mal_id={slug}", headers=HEADERS)
async def get_anime_id(slug: str) -> tuple[str, dict]:
# slug is actually mal_id (int or str)
resp = await httpx.get(f"{DB_URL}?mal_id={slug}", headers=HEADERS)
resp.raise_for_status()
data = resp.json()
if not data or not data[0].get("info", {}).get("kai_id"):
@@ -85,19 +84,29 @@ async def get_anime_id(client, slug: str) -> str:
return data[0]["info"]["kai_id"], data[0]
async def get_episode_token(data: dict, episode: int) -> str:
# Always fetch the sub ("1") token for the episode
try:
token = data["episodes"]["1"][str(episode)]["token"]
except Exception:
raise RuntimeError("Episode token not found in JSON response")
return token
async def get_episode_token(data: dict, episode: int, dub: bool) -> str:
# "1" = Sub, "2" = Dub in this specific database structure
primary_key = "2" if dub else "1"
fallback_key = "1" if dub else "2"
ep_str = str(episode)
episodes_data = data.get("episodes", {})
# Try requested language first
if primary_key in episodes_data and ep_str in episodes_data[primary_key]:
return episodes_data[primary_key][ep_str]["token"]
# Fallback to the other language if requested isn't available
if fallback_key in episodes_data and ep_str in episodes_data[fallback_key]:
return episodes_data[fallback_key][ep_str]["token"]
raise RuntimeError(f"Episode token not found for episode {episode}")
async def get_server_id(client, ep_token: str, dub: bool) -> str:
enc = await enc_kai(client, ep_token)
async def get_server_id(ep_token: str, dub: bool) -> List[str]:
enc = await enc_kai(ep_token)
html = await client.get(
html = await httpx.get(
f"{BASE_URL}/ajax/links/list",
params={"token": ep_token, "_": enc},
headers=HEADERS
@@ -112,7 +121,7 @@ async def get_server_id(client, ep_token: str, dub: bool) -> str:
# Determine which language group to use
# Default: sub (Hard Sub), or dub if requested
lang_order = ["dub", "sub"] if dub else ["sub", "softsub", "dub"]
all_server_ids =[]
all_server_ids = []
for lang in lang_order:
group_pattern = rf'<div class="server-items lang-group" data-id="{lang}"[^>]*>(.*?)</div>'
group_match = re.search(group_pattern, html_content, re.DOTALL)
@@ -128,16 +137,16 @@ async def get_server_id(client, ep_token: str, dub: bool) -> str:
return all_server_ids
async def resolve_stream_url(client, server_id: str) -> dict:
enc = await enc_kai(client, server_id)
async def resolve_stream_url(server_id: str) -> dict:
enc = await enc_kai(server_id)
view = await client.get(
view = await httpx.get(
f"{BASE_URL}/ajax/links/view",
params={"id": server_id, "_": enc},
headers=HEADERS
)
decrypted = await dec_kai(client, view.json()["result"])
decrypted = await dec_kai(view.json()["result"])
print(f"decrypted (dec_kai): {decrypted}")
skip = {}
@@ -153,15 +162,15 @@ async def resolve_stream_url(client, server_id: str) -> dict:
parsed = urllib.parse.urlparse(media_url)
referer = f"{parsed.scheme}://{parsed.netloc}"
media = await client.get(media_url, headers=HEADERS)
mega = await dec_mega(client, media.json()["result"])
media = await httpx.get(media_url, headers=HEADERS)
mega = await dec_mega(media.json()["result"])
print(mega)
sources = mega.get("result", {}).get("sources",[])
tracks = mega.get("result", {}).get("tracks",[])
sources = mega.get("result", {}).get("sources", [])
tracks = mega.get("result", {}).get("tracks", [])
# Find captions and thumbnails
captions =[t for t in tracks if t.get("kind") == "captions"]
captions = [t for t in tracks if t.get("kind") == "captions"]
thumbnails = [t for t in tracks if t.get("kind") == "thumbnails"]
if not sources:
@@ -180,32 +189,25 @@ async def resolve_stream_url(client, server_id: str) -> dict:
# PUBLIC MODULE API
# =========================
async def get_iframe_source(
mal_id: int,
episode: int,
dub: bool
) -> Optional[str]:
async def get_iframe_source(mal_id: int, episode: int, dub: bool) -> Optional[str]:
"""
Returns resolved m3u8 stream URL using the injected Tunneling HybridClient
Returns resolved m3u8 stream URL integrated into the player link
"""
# The 'httpx' variable resolves to the injected HybridClient dynamically
# replaced during module processing in app.py's `get_iframe_source` endpoint.
client = httpx
try:
ani_id, anime_json = await get_anime_id(client, mal_id)
ani_id, anime_json = await get_anime_id(str(mal_id))
print(f"ani_id: {ani_id}")
ep_token = await get_episode_token(anime_json, episode)
# Pass the dub parameter so it checks for the correct sub/dub token
ep_token = await get_episode_token(anime_json, episode, dub)
print(f"ep_token: {ep_token}")
server_ids = await get_server_id(client, ep_token, dub)
server_ids = await get_server_id(ep_token, dub)
print(f"server_ids: {server_ids}")
for sid in server_ids:
try:
print(f"Trying server_id: {sid}")
result = await resolve_stream_url(client, sid)
result = await resolve_stream_url(sid)
if result:
url = result["url"]
@@ -234,13 +236,11 @@ async def get_iframe_source(
player_url += f"&id={mal_id}&episode={episode}&video={urllib.parse.quote(url)}"
return player_url
except Exception as e:
print(f"Failed to resolve server {sid}: {e}")
print("All servers failed.")
return None
except Exception as e:
import traceback
print(f"Exception in get_iframe_source: {type(e).__name__}: {e}")
@@ -248,12 +248,7 @@ async def get_iframe_source(
return None
async def get_download_link(
mal_id: int,
episode: int,
dub: bool,
quality: str
) -> Optional[str]:
async def get_download_link(mal_id: int, episode: int, dub: bool, quality: str) -> Optional[str]:
"""
Alias for get_iframe_source (AnimeKai uses adaptive m3u8)
"""

203
modules/comix.module Normal file
View File

@@ -0,0 +1,203 @@
{
"name": "Comix Reader",
"version": "1.0.6",
"author": "Animex",
"description": "Comix.to Manga Reader - Double-Safe Nested Data Parsing.",
"type": "MANGA_READER",
"requirements": ["httpx", "re", "json"]
}
---
import re
import json
import urllib.parse
import inspect
import httpx
# Exact headers from your working test client
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
'Accept': 'application/json, text/plain, */*',
'Referer': 'https://comix.to/'
}
async def _smart_fetch(method: str, url: str, **kwargs):
"""Uses injected HybridClient if available, otherwise falls back to real httpx."""
client_or_lib = globals().get('httpx')
func = getattr(client_or_lib, method.lower(), None)
if func and inspect.iscoroutinefunction(func):
resp = await func(url, **kwargs)
# Debug: dump raw response to see what we're actually getting
raw = getattr(resp, 'text', '') or ''
print(f"[Comix] _smart_fetch response preview: {raw[:300]}")
return resp
# Fallback: real httpx.AsyncClient
import httpx as _real_httpx
async with _real_httpx.AsyncClient(follow_redirects=True) as client:
resp = await getattr(client, method.lower())(url, **kwargs)
print(f"[Comix] _smart_fetch (direct) response preview: {resp.text[:300]}")
return resp
def get_nested(data, *keys, default=None):
"""Helper to safely traverse deeply nested dictionaries even if keys are None."""
for key in keys:
if isinstance(data, dict):
data = data.get(key)
else:
return default
return data if data is not None else default
async def get_title_from_mal(mal_id: int):
"""Fetches the title from MyAnimeList via Jikan."""
url = f"https://api.jikan.moe/v4/manga/{mal_id}"
try:
resp = await _smart_fetch("GET", url)
if not resp: return None
data = resp.json() if hasattr(resp, "json") else None
# Safe traversal of data['data']['title']
return get_nested(data, 'data', 'title')
except Exception as e:
print(f"[Comix] MAL Fetch Error: {e}")
return None
async def search_manga(query: str):
"""Searches Comix.to and returns (hash_id, slug)."""
if not query: return None
url = f"https://comix.to/api/v2/manga?keyword={urllib.parse.quote(query)}&order[relevance]=desc"
try:
resp = await _smart_fetch("GET", url, headers=HEADERS)
print(f"[Comix] Search status: {resp.status_code}")
data = resp.json() if hasattr(resp, "json") else None
if data is None:
print(f"[Comix] Search response was not JSON. Raw: {resp.text[:200]}")
return None
# Safely get first item from result -> items
items = get_nested(data, 'result', 'items', default=[])
if items and isinstance(items, list) and len(items) > 0:
first = items[0]
if isinstance(first, dict):
return first.get('hash_id'), first.get('slug')
except Exception as e:
print(f"[Comix] Search Error: {e}")
return None
async def get_chapters(mal_id: int):
try:
print(f"[Comix] get_chapters called for MAL ID: {mal_id}")
title = await get_title_from_mal(mal_id)
print(f"[Comix] Resolved title: {title}")
if not title: return None
manga_info = await search_manga(title)
print(f"[Comix] Search result: {manga_info}")
if not manga_info: return None
hash_id, slug = manga_info
# Paginate since API caps at 100 per request
all_items = []
offset = 0
seen_ids = set()
all_items = []
page = 1
while True:
url = f"https://comix.to/api/v2/manga/{hash_id}/chapters?order[number]=asc&limit=100&page={page}"
resp = await _smart_fetch("GET", url, headers=HEADERS)
data = resp.json() if hasattr(resp, "json") else None
if data is None or data.get("status") != 200:
print(f"[Comix] Bad response on page {page}: {data.get('message') if data else resp.text[:200]}")
break
items = get_nested(data, 'result', 'items', default=[])
pagination = get_nested(data, 'result', 'pagination', default={})
last_page = pagination.get('last_page', 1)
if not items:
break
all_items.extend(items)
print(f"[Comix] Page {page}/{last_page} — got {len(items)} chapters, total so far: {len(all_items)}")
if page >= last_page:
break
page += 1
seen_numbers = {}
formatted = []
for item in all_items:
if not isinstance(item, dict): continue
num = str(item.get('number', '0'))
if num not in seen_numbers:
seen_numbers[num] = True
formatted.append({
"title": item.get('name') or f"Chapter {num}",
"url": f"{hash_id}:{slug}:{item.get('chapter_id')}",
"chapter_number": num,
"is_external": False
})
def safe_float(v):
try: return float(v)
except: return 0.0
formatted.sort(key=lambda x: safe_float(x['chapter_number']), reverse=True)
return formatted
except Exception as e:
print(f"[Comix] Chapter Fetch Exception: {e}")
import traceback
traceback.print_exc()
return None
async def get_chapter_images(mal_id: int, chapter_num: str):
"""Public API: Scrapes image URLs from the chapter page."""
try:
chapters = await get_chapters(mal_id)
if not chapters: return None
target_chapter = None
target_f = None
try: target_f = float(chapter_num)
except: pass
for ch in chapters:
if target_f is not None:
try:
if float(ch["chapter_number"]) == target_f:
target_chapter = ch
break
except: pass
if ch["chapter_number"] == str(chapter_num):
target_chapter = ch
break
if not target_chapter: return None
hash_id, slug, chapter_id = target_chapter["url"].split(":")
url = f"https://comix.to/title/{hash_id}-{slug}/{chapter_id}-chapter-{chapter_num}"
resp = await _smart_fetch("GET", url, headers=HEADERS)
if not resp or not hasattr(resp, "text"): return None
regex = r'["\\]*images["\\]*\s*:\s*(\[[^\]]*\])'
match = re.search(regex, resp.text, re.DOTALL)
if match:
raw_json = match.group(1).replace('\\"', '"')
images_data = json.loads(raw_json)
if isinstance(images_data, list):
return [img['url'] for img in images_data if isinstance(img, dict) and 'url' in img]
return []
except Exception as e:
print(f"[Comix] Scraper Error: {e}")
return None

View File

@@ -1,184 +1,165 @@
{
"name": "MangaDex Reader",
"version": "1.0.0",
"version": "1.1.5",
"author": "Animex",
"description": "Fetches manga chapters and page images from MangaDex using their v5 API.",
"description": "MangaDex Reader - Forced Hosted Chapters Mode (Bypasses External Links).",
"type": "MANGA_READER",
"requirements": ["httpx"]
}
---
import asyncio
import httpx
import inspect
import urllib.parse
from typing import Optional, List, Dict, Any
# --- Helper Functions ---
# =========================
# SMART TUNNEL HELPER
# =========================
def _uses_hybrid_client() -> bool:
return not hasattr(httpx, "AsyncClient")
async def _smart_fetch(method: str, url: str, **kwargs) -> Any:
func = getattr(httpx, method.lower())
if inspect.iscoroutinefunction(func):
return await func(url, **kwargs)
async with httpx.AsyncClient(follow_redirects=True) as client:
return await getattr(client, method.lower())(url, **kwargs)
async def _fetch_json(url: str, params: Dict[str, Any] = None, headers: Dict[str, str] = None, timeout: int = 10) -> Dict[str, Any]:
if _uses_hybrid_client():
resp = await httpx.get(url, params=params, headers=headers, timeout=timeout)
else:
async with httpx.AsyncClient() as client:
resp = await client.get(url, params=params, headers=headers, timeout=timeout)
resp.raise_for_status()
return resp.json()
async def _fetch_json(url: str, params: Dict[str, Any] = None, headers: Dict[str, str] = None, timeout: int = 15) -> Dict[str, Any]:
try:
resp = await _smart_fetch("GET", url, params=params, headers=headers, timeout=timeout)
resp.raise_for_status()
data = resp.json()
return data
except Exception as e:
print(f" [MangaDex Debug] Request failed: {url}")
raise
async def get_title_from_mal(mal_id: int, client: httpx.AsyncClient) -> Optional[str]:
"""
Fetches the primary English or Romaji title from Jikan (MAL API)
to use for searching MangaDex.
"""
# =========================
# INTERNAL LOGIC
# =========================
async def get_title_from_mal(mal_id: int) -> Optional[str]:
url = f"https://api.jikan.moe/v4/manga/{mal_id}"
try:
data = await _fetch_json(url)
# Prefer English title for search accuracy, fallback to default title
return data.get("data", {}).get("title_english") or data.get("data", {}).get("title")
except Exception as e:
print(f"MangaDex-Module: Jikan API error: {e}")
return None
except Exception: return None
async def find_mangadex_id(mal_id: int, title: str, client: httpx.AsyncClient) -> Optional[str]:
"""
Searches MangaDex for the title and verifies the MAL ID in the metadata
to ensure we have the correct manga.
"""
search_url = "https://api.mangadex.org/manga"
params = {
"title": title,
"limit": 10,
"order[relevance]": "desc"
}
async def find_mangadex_id(mal_id: int, title: str) -> Optional[str]:
search_url = (
f"https://api.mangadex.org/manga"
f"?title={urllib.parse.quote(title)}&limit=5"
f"&contentRating[]=safe&contentRating[]=suggestive&contentRating[]=erotica&contentRating[]=pornographic"
)
try:
data = await _fetch_json(search_url, params=params)
results = data.get("data", [])
for manga in results:
attributes = manga.get("attributes", {})
links = attributes.get("links", {})
# Check if the MAL ID provided in MangaDex metadata matches our target
# Note: links['mal'] is a string in their API
if links.get("mal") == str(mal_id):
data = await _fetch_json(search_url)
for manga in data.get("data", []):
if manga.get("attributes", {}).get("links", {}).get("mal") == str(mal_id):
return manga["id"]
# Fallback: If no strict MAL ID match found, return the first result
# if the titles are very similar (basic loose match)
if results:
print(f"MangaDex-Module: Strict MAL ID match failed. Defaulting to top search result: {results[0]['attributes']['title']}")
return results[0]["id"]
return None
return data["data"][0]["id"] if data.get("data") else None
except Exception: return None
except Exception as e:
print(f"MangaDex-Module: Search failed: {e}")
return None
# --- Main Module Functions ---
# =========================
# PUBLIC MODULE API
# =========================
async def get_chapters(mal_id: int) -> Optional[List[Dict[str, Any]]]:
"""
Asynchronously gets a list of chapters for a given MyAnimeList ID
via MangaDex API.
"""
title = await get_title_from_mal(mal_id, httpx)
if not title:
print("MangaDex-Module: Could not retrieve title from MAL.")
return None
title = await get_title_from_mal(mal_id)
if not title: return None
md_id = await find_mangadex_id(mal_id, title)
if not md_id: return None
md_id = await find_mangadex_id(mal_id, title, httpx)
if not md_id:
print(f"MangaDex-Module: Could not find MangaDex ID for MAL ID {mal_id}")
return None
feed_url = f"https://api.mangadex.org/manga/{md_id}/feed"
params = {
"translatedLanguage[]": "en",
"order[chapter]": "desc",
"limit": 500,
"includes[]": "scanlation_group"
}
print(f" [MangaDex Debug] Fetching HOSTED ONLY feed for MD_ID: {md_id}")
# CRITICAL CHANGE: includeExternalChapters=0 forces the API to return
# chapters actually hosted on MangaDex servers, ignoring official external redirects.
feed_url = (
f"https://api.mangadex.org/manga/{md_id}/feed"
f"?translatedLanguage[]=en"
f"&limit=500"
f"&contentRating[]=safe&contentRating[]=suggestive&contentRating[]=erotica&contentRating[]=pornographic"
f"&order[chapter]=asc"
f"&includes[]=scanlation_group"
)
try:
data = await _fetch_json(feed_url, params=params)
chapters = data.get("data", [])
data = await _fetch_json(feed_url)
raw_chapters = data.get("data", [])
print(f" [MangaDex Debug] Found {len(raw_chapters)} HOSTED chapters.")
formatted = []
seen_numbers = set()
formatted_chapters = []
seen_chapters = set()
for ch in chapters:
for ch in raw_chapters:
attr = ch.get("attributes", {})
chapter_num = attr.get("chapter")
num = attr.get("chapter")
if chapter_num is None:
if num is None or num in seen_numbers:
continue
# Find scanlation group name
group_name = "Unknown Group"
for rel in ch.get("relationships", []):
if rel["type"] == "scanlation_group":
group_name = rel.get("attributes", {}).get("name", "Unknown Group")
break
if chapter_num in seen_chapters:
continue
seen_chapters.add(chapter_num)
chapter_title = attr.get("title") or f"Chapter {chapter_num}"
formatted_chapters.append({
"title": chapter_title,
"url": ch["id"],
"chapter_number": str(chapter_num)
seen_numbers.add(num)
formatted.append({
"title": f"Ch. {num} - {attr.get('title') or group_name}",
"url": ch["id"],
"chapter_number": str(num),
"is_external": False
})
return formatted_chapters
def safe_float(v):
try: return float(v)
except: return 0.0
formatted.sort(key=lambda x: safe_float(x['chapter_number']), reverse=True)
return formatted
except Exception as e:
print(f"MangaDex-Module: Error fetching chapters: {e}")
print(f" [MangaDex Debug] Feed Error: {e}")
return None
async def get_chapter_images(mal_id: int, chapter_num: str) -> Optional[List[str]]:
"""
Asynchronously gets page image URLs for a specific chapter number.
Note: 'chapter_num' is used to look up the UUID from the chapter list logic.
"""
# 1. We need the Chapter UUID. Re-using get_chapters to map Num -> UUID.
# In a production app, you might cache the chapter list to avoid this extra call.
print(f"🎬 MangaDex: Retrieving Images for MAL:{mal_id} Chapter:{chapter_num}")
all_chapters = await get_chapters(mal_id)
if not all_chapters:
print("❌ MangaDex: No hosted chapters found in feed.")
return None
chapter_uuid = None
for ch in all_chapters:
if ch.get("chapter_number") == str(chapter_num):
chapter_uuid = ch.get("url") # This contains the UUID from get_chapters
break
if not chapter_uuid:
print(f"MangaDex-Module: Chapter {chapter_num} not found for MAL ID {mal_id}")
target = str(chapter_num)
# Match via float to handle "1" vs "1.0"
chapter_data = None
try:
target_f = float(target)
chapter_data = next((ch for ch in all_chapters if float(ch["chapter_number"]) == target_f), None)
except:
chapter_data = next((ch for ch in all_chapters if ch["chapter_number"] == target), None)
if not chapter_data:
print(f"❌ MangaDex: Chapter {target} is not available in hosted mode.")
return None
# 2. Call MangaDex At-Home API to get image metadata
async with httpx.AsyncClient() as client:
try:
at_home_url = f"https://api.mangadex.org/at-home/server/{chapter_uuid}"
resp = await client.get(at_home_url, timeout=10)
resp.raise_for_status()
data = resp.json()
base_url = data.get("baseUrl")
chapter_hash = data.get("chapter", {}).get("hash")
# 'data' contains full quality, 'dataSaver' contains compressed
filenames = data.get("chapter", {}).get("data", [])
chapter_uuid = chapter_data["url"]
print(f"🔗 MangaDex: Target UUID: {chapter_uuid}")
if not base_url or not chapter_hash or not filenames:
print("MangaDex-Module: Incomplete data received from At-Home API.")
return []
try:
at_home_url = f"https://api.mangadex.org/at-home/server/{chapter_uuid}"
data = await _fetch_json(at_home_url)
base_url = data.get("baseUrl")
chapter_hash = data.get("chapter", {}).get("hash")
filenames = data.get("chapter", {}).get("data", [])
# 3. Construct direct image URLs
# Format: {baseUrl}/data/{hash}/{filename}
image_links = [
f"{base_url}/data/{chapter_hash}/{filename}"
for filename in filenames
]
return image_links
if not filenames:
print(f"⚠️ MangaDex: No images found in At-Home response.")
return []
except Exception as e:
print(f"MangaDex-Module: Error fetching images: {e}")
return None
print(f"✅ MangaDex: Found {len(filenames)} images.")
return [f"{base_url}/data/{chapter_hash}/{f}" for f in filenames]
except Exception as e:
print(f"❌ MangaDex: At-Home API failed: {e}")
return None