This commit is contained in:
2026-03-29 20:52:57 -05:00
parent a97c3a5b57
commit cf155183f2
102 changed files with 55674 additions and 0 deletions

144
modules/animekai.module Normal file
View File

@@ -0,0 +1,144 @@
{
"name": "AnimeKai Streamer",
"version": "1.4.0",
"author": "Animex",
"description": "Cloud-optimized AnimeKai resolver. Handles Tunnel routing and deep decryption.",
"type": ["ANIME_STREAMER"],
"requirements": ["httpx", "beautifulsoup4"]
}
---
import re
import json
import httpx
import urllib.parse
from typing import Optional, Dict, List, Any
# Constants
BASE_URL = "https://animekai.to"
ENC_API = "https://enc-dec.app/api"
DB_URL = "https://enc-dec.app/db/kai/find"
UA = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
async def get_iframe_source(mal_id: int, episode: int, dub: bool) -> Optional[str]:
"""
Core resolver function. 'httpx' here is the HybridClient injected by app.py.
"""
try:
# 1. Resolve MAL ID to AnimeKai metadata
meta_res = await httpx.get(f"{DB_URL}?mal_id={mal_id}")
meta_res.raise_for_status()
meta_data = meta_res.json()
if not meta_data:
return None
# 2. Locate the specific episode token
lang_key = "2" if dub else "1"
ep_entry = meta_data[0].get("episodes", {}).get(lang_key, {}).get(str(episode))
if not ep_entry:
# Fallback to sub if dub is missing
ep_entry = meta_data[0].get("episodes", {}).get("1", {}).get(str(episode))
if not ep_entry:
return None
token = ep_entry["token"]
# 3. Get Server List (requires an encrypted version of the token)
enc_token_req = await httpx.get(f"{ENC_API}/enc-kai", params={"text": token})
enc_token = enc_token_req.json().get("result")
servers_res = await httpx.get(f"{BASE_URL}/ajax/links/list", params={"token": token, "_": enc_token})
servers_html = servers_res.json().get("result", "")
# Extract server data-lids
server_ids = re.findall(r'data-lid="([^"]+)"', servers_html)
if not server_ids:
return None
# 4. Iterate through servers to find a working stream
for sid in server_ids:
try:
# Encrypt SID for the view-link request
enc_sid_req = await httpx.get(f"{ENC_API}/enc-kai", params={"text": sid})
enc_sid = enc_sid_req.json().get("result")
view_res = await httpx.get(f"{BASE_URL}/ajax/links/view", params={"id": sid, "_": enc_sid})
# Decrypt the view response to get the embed host URL and skip times
dec_view_req = await httpx.post(f"{ENC_API}/dec-kai", json_data={"text": view_res.json().get("result")})
dec_view = dec_view_req.json().get("result")
if not dec_view or "url" not in dec_view:
continue
# Convert embed URL to media API URL (e.g., megaup.nl/e/... -> megaup.nl/media/...)
media_url = dec_view["url"].replace("/e/", "/media/")
# Fetch the media page (this is where the Home Agent's User-Agent is crucial)
media_page = await httpx.get(media_url, headers={"User-Agent": UA, "Referer": BASE_URL})
# 5. Robust Extraction of the encrypted "result" blob
# We use regex directly on the HTML to find the result string
blob_match = re.search(r'"result"\s*:\s*"([^"]+)"', media_page.text)
if not blob_match:
continue
encrypted_blob = blob_match.group(1)
# 6. Final decryption of the stream sources
final_dec_req = await httpx.post(f"{ENC_API}/dec-mega", json_data={"text": encrypted_blob, "agent": UA})
final_data = final_dec_req.json().get("result", {})
sources = final_data.get("sources", [])
if not sources:
continue
video_url = sources[0].get("file")
# 7. Construct the final player URL for the frontend
# Extract the host for the referer header
parsed_uri = urllib.parse.urlparse(media_url)
referer_host = f"{parsed_uri.scheme}://{parsed_uri.netloc}"
params = {
"video": video_url,
"referer": referer_host,
"id": mal_id,
"episode": episode,
"stream": "true",
"full": "true"
}
# Add skip times (Intro/Outro) if available
if dec_view.get("skip"):
params["skip_times"] = json.dumps(dec_view["skip"])
# Add subtitles if available
tracks = final_data.get("tracks", [])
subs = [t for t in tracks if t.get("kind") == "captions"]
if subs:
params["captions"] = json.dumps(subs)
return f"/video_player.html?{urllib.parse.urlencode(params)}"
except Exception as e:
print(f"[AnimeKai] Sid {sid} failed: {str(e)}")
continue
return None
except Exception as e:
print(f"[AnimeKai] Global Module Error: {str(e)}")
return None
async def get_download_link(mal_id: int, episode: int, dub: bool, quality: str) -> Optional[str]:
"""
Returns the direct m3u8 link.
"""
source_url = await get_iframe_source(mal_id, episode, dub)
if source_url:
parsed = urllib.parse.urlparse(source_url)
qs = urllib.parse.parse_qs(parsed.query)
return qs.get("video", [None])[0]
return None

184
modules/mangadex.module Normal file
View File

@@ -0,0 +1,184 @@
{
"name": "MangaDex Reader",
"version": "1.0.0",
"author": "Animex",
"description": "Fetches manga chapters and page images from MangaDex using their v5 API.",
"type": "MANGA_READER",
"requirements": ["httpx"]
}
---
import asyncio
import httpx
from typing import Optional, List, Dict, Any
# --- Helper Functions ---
def _uses_hybrid_client() -> bool:
return not hasattr(httpx, "AsyncClient")
async def _fetch_json(url: str, params: Dict[str, Any] = None, headers: Dict[str, str] = None, timeout: int = 10) -> Dict[str, Any]:
if _uses_hybrid_client():
resp = await httpx.get(url, params=params, headers=headers, timeout=timeout)
else:
async with httpx.AsyncClient() as client:
resp = await client.get(url, params=params, headers=headers, timeout=timeout)
resp.raise_for_status()
return resp.json()
async def get_title_from_mal(mal_id: int, client: httpx.AsyncClient) -> Optional[str]:
"""
Fetches the primary English or Romaji title from Jikan (MAL API)
to use for searching MangaDex.
"""
url = f"https://api.jikan.moe/v4/manga/{mal_id}"
try:
data = await _fetch_json(url)
# Prefer English title for search accuracy, fallback to default title
return data.get("data", {}).get("title_english") or data.get("data", {}).get("title")
except Exception as e:
print(f"MangaDex-Module: Jikan API error: {e}")
return None
async def find_mangadex_id(mal_id: int, title: str, client: httpx.AsyncClient) -> Optional[str]:
"""
Searches MangaDex for the title and verifies the MAL ID in the metadata
to ensure we have the correct manga.
"""
search_url = "https://api.mangadex.org/manga"
params = {
"title": title,
"limit": 10,
"order[relevance]": "desc"
}
try:
data = await _fetch_json(search_url, params=params)
results = data.get("data", [])
for manga in results:
attributes = manga.get("attributes", {})
links = attributes.get("links", {})
# Check if the MAL ID provided in MangaDex metadata matches our target
# Note: links['mal'] is a string in their API
if links.get("mal") == str(mal_id):
return manga["id"]
# Fallback: If no strict MAL ID match found, return the first result
# if the titles are very similar (basic loose match)
if results:
print(f"MangaDex-Module: Strict MAL ID match failed. Defaulting to top search result: {results[0]['attributes']['title']}")
return results[0]["id"]
return None
except Exception as e:
print(f"MangaDex-Module: Search failed: {e}")
return None
# --- Main Module Functions ---
async def get_chapters(mal_id: int) -> Optional[List[Dict[str, Any]]]:
"""
Asynchronously gets a list of chapters for a given MyAnimeList ID
via MangaDex API.
"""
title = await get_title_from_mal(mal_id, httpx)
if not title:
print("MangaDex-Module: Could not retrieve title from MAL.")
return None
md_id = await find_mangadex_id(mal_id, title, httpx)
if not md_id:
print(f"MangaDex-Module: Could not find MangaDex ID for MAL ID {mal_id}")
return None
feed_url = f"https://api.mangadex.org/manga/{md_id}/feed"
params = {
"translatedLanguage[]": "en",
"order[chapter]": "desc",
"limit": 500,
"includes[]": "scanlation_group"
}
try:
data = await _fetch_json(feed_url, params=params)
chapters = data.get("data", [])
formatted_chapters = []
seen_chapters = set()
for ch in chapters:
attr = ch.get("attributes", {})
chapter_num = attr.get("chapter")
if chapter_num is None:
continue
if chapter_num in seen_chapters:
continue
seen_chapters.add(chapter_num)
chapter_title = attr.get("title") or f"Chapter {chapter_num}"
formatted_chapters.append({
"title": chapter_title,
"url": ch["id"],
"chapter_number": str(chapter_num)
})
return formatted_chapters
except Exception as e:
print(f"MangaDex-Module: Error fetching chapters: {e}")
return None
async def get_chapter_images(mal_id: int, chapter_num: str) -> Optional[List[str]]:
"""
Asynchronously gets page image URLs for a specific chapter number.
Note: 'chapter_num' is used to look up the UUID from the chapter list logic.
"""
# 1. We need the Chapter UUID. Re-using get_chapters to map Num -> UUID.
# In a production app, you might cache the chapter list to avoid this extra call.
all_chapters = await get_chapters(mal_id)
if not all_chapters:
return None
chapter_uuid = None
for ch in all_chapters:
if ch.get("chapter_number") == str(chapter_num):
chapter_uuid = ch.get("url") # This contains the UUID from get_chapters
break
if not chapter_uuid:
print(f"MangaDex-Module: Chapter {chapter_num} not found for MAL ID {mal_id}")
return None
# 2. Call MangaDex At-Home API to get image metadata
async with httpx.AsyncClient() as client:
try:
at_home_url = f"https://api.mangadex.org/at-home/server/{chapter_uuid}"
resp = await client.get(at_home_url, timeout=10)
resp.raise_for_status()
data = resp.json()
base_url = data.get("baseUrl")
chapter_hash = data.get("chapter", {}).get("hash")
# 'data' contains full quality, 'dataSaver' contains compressed
filenames = data.get("chapter", {}).get("data", [])
if not base_url or not chapter_hash or not filenames:
print("MangaDex-Module: Incomplete data received from At-Home API.")
return []
# 3. Construct direct image URLs
# Format: {baseUrl}/data/{hash}/{filename}
image_links = [
f"{base_url}/data/{chapter_hash}/{filename}"
for filename in filenames
]
return image_links
except Exception as e:
print(f"MangaDex-Module: Error fetching images: {e}")
return None