a bunc of stuff

This commit is contained in:
2026-04-05 20:22:12 -05:00
parent ef2a685561
commit c9f35ae27a
41 changed files with 1071 additions and 151 deletions

View File

@@ -870,6 +870,75 @@
loadHeroProxyCache(); loadHeroProxyCache();
// --- POSTER IMAGE BLOB CACHE (IndexedDB + in-memory) ---
const _posterBlobCache = new Map(); // url -> blobURL, lives for session
const _IMAGE_DB_NAME = 'animex-poster-cache';
const _IMAGE_DB_STORE = 'posters';
let _imageDb = null;
(function initImageDb() {
try {
const req = indexedDB.open(_IMAGE_DB_NAME, 1);
req.onupgradeneeded = (e) => {
e.target.result.createObjectStore(_IMAGE_DB_STORE, { keyPath: 'url' });
};
req.onsuccess = (e) => { _imageDb = e.target.result; };
req.onerror = () => {};
} catch (e) {}
})();
function _getImageBlob(url) {
return new Promise((resolve) => {
if (!_imageDb) return resolve(null);
try {
const tx = _imageDb.transaction(_IMAGE_DB_STORE, 'readonly');
const req = tx.objectStore(_IMAGE_DB_STORE).get(url);
req.onsuccess = () => resolve(req.result?.blob || null);
req.onerror = () => resolve(null);
} catch (e) { resolve(null); }
});
}
function _saveImageBlob(url, blob) {
if (!_imageDb) return;
try {
const tx = _imageDb.transaction(_IMAGE_DB_STORE, 'readwrite');
tx.objectStore(_IMAGE_DB_STORE).put({ url, blob });
} catch (e) {}
}
async function loadPosterImage(imgEl, proxyUrl) {
if (!proxyUrl || !imgEl) return;
// 1. In-memory hit — instant
if (_posterBlobCache.has(proxyUrl)) {
imgEl.src = _posterBlobCache.get(proxyUrl);
return;
}
// 2. IndexedDB hit — fast, no network
const cachedBlob = await _getImageBlob(proxyUrl);
if (cachedBlob) {
const blobUrl = URL.createObjectURL(cachedBlob);
_posterBlobCache.set(proxyUrl, blobUrl);
imgEl.src = blobUrl;
return;
}
// 3. Network fetch — display immediately, then cache the response
imgEl.src = proxyUrl;
try {
const res = await fetch(proxyUrl);
if (res.ok) {
const blob = await res.blob();
const blobUrl = URL.createObjectURL(blob);
_posterBlobCache.set(proxyUrl, blobUrl);
imgEl.src = blobUrl; // swap to blob URL
_saveImageBlob(proxyUrl, blob); // persist for next visit
}
} catch (e) { /* keep original src on failure */ }
}
// --- ELEMENT SELECTORS --- // --- ELEMENT SELECTORS ---
const mainContentArea = document.getElementById("main-content-area"); const mainContentArea = document.getElementById("main-content-area");
@@ -1256,16 +1325,20 @@
<div class="poster-image-wrapper"> <div class="poster-image-wrapper">
<div class="episode-indicator">EP ${lastWatchedEp}${indicatorText}</div> <div class="episode-indicator">EP ${lastWatchedEp}${indicatorText}</div>
<button class="poster-info-btn" title="Series Info"><i class="fas fa-info-circle"></i></button> <button class="poster-info-btn" title="Series Info"><i class="fas fa-info-circle"></i></button>
<img src="${getProxyImageUrl( <img alt="${title}" loading="lazy">
animeData.images?.jpg?.large_image_url ||
animeData.images?.jpg?.image_url)
}" alt="${title}" loading="lazy">
<div class="progress-bar"> <div class="progress-bar">
<div class="progress-bar-inner" style="width: ${progressPercent}%;"></div> <div class="progress-bar-inner" style="width: ${progressPercent}%;"></div>
</div> </div>
</div> </div>
<p class="poster-title" title="${title}">${title}</p> <p class="poster-title" title="${title}">${title}</p>
`; `;
loadPosterImage(
posterContainer.querySelector('img'),
getProxyImageUrl(
animeData.images?.jpg?.large_image_url ||
animeData.images?.jpg?.image_url
)
);
posterContainer.addEventListener("click", () => posterContainer.addEventListener("click", () =>
openSeriesOverlay(animeData.mal_id, episodeToOpen, true) openSeriesOverlay(animeData.mal_id, episodeToOpen, true)
); );
@@ -1310,10 +1383,11 @@
posterContainer.dataset.malId = anime.id; posterContainer.dataset.malId = anime.id;
posterContainer.innerHTML = ` posterContainer.innerHTML = `
<div class="poster-image-wrapper"> <div class="poster-image-wrapper">
<img src="${getProxyImageUrl(anime.image)}" alt="${anime.name}" loading="lazy"> <img alt="${anime.name}" loading="lazy">
</div> </div>
<p class="poster-title" title="${anime.name}">${anime.name}</p> <p class="poster-title" title="${anime.name}">${anime.name}</p>
`; `;
loadPosterImage(posterContainer.querySelector('img'), getProxyImageUrl(anime.image));
posterContainer.addEventListener("click", () => posterContainer.addEventListener("click", () =>
openSeriesOverlay(anime.id) openSeriesOverlay(anime.id)
); );

View File

@@ -390,6 +390,12 @@
border: 1px solid #00b4d8; border: 1px solid #00b4d8;
} }
.mod-badge.weebcentral {
background: rgba(0, 216, 7, 0.2);
color: #00d819;
border: 1px solid #00d819;
}
.chapter-actions { .chapter-actions {
display: flex; display: flex;
gap: 10px; gap: 10px;

View File

@@ -1743,21 +1743,14 @@ iframe { width: 100%; height: 100%; border: none; }
async function fetchAndRenderEpisodes() { async function fetchAndRenderEpisodes() {
let episodes = []; let episodes = [];
try { try {
const mapRes = await fetch(`${serverUrl}/map/mal/${currentMalId}`); // Single request to the backend cache endpoint — returns the complete episode
if (mapRes.ok) { // list for the whole series, regardless of episode count. The server handles
const mapData = await mapRes.json(); // Kitsu pagination, smart staleness TTLs based on airing status, and incremental
if (mapData.kitsu_id) { // background refreshes so currently-airing shows always stay up to date.
// Optimized paging for kitsu (Handles 1000+ eps) const epRes = await fetch(`${serverUrl}/anime/${currentMalId}/episodes`);
let url = `/proxy?url=https://kitsu.io/api/edge/anime/${mapData.kitsu_id}/episodes?page[limit]=20`; if (epRes.ok) {
let count = 0; const epData = await epRes.json();
while (url && count < 60) { // Safety cap for non-cached sessions episodes = epData.episodes || [];
const r = await fetch(url);
const d = await r.json();
episodes = [...episodes, ...d.data];
url = d.links?.next;
count++;
}
}
} }
} catch (e) {} } catch (e) {}

View File

@@ -331,14 +331,40 @@
display: flex; display: flex;
gap: 12px; gap: 12px;
overflow-x: auto; overflow-x: auto;
padding: 10px 0 20px 0; padding: 10px 0 16px 0;
scroll-behavior: smooth; scroll-behavior: smooth;
-webkit-overflow-scrolling: touch; -webkit-overflow-scrolling: touch;
scrollbar-width: none; /* Firefox */ /* Touch devices: hide scrollbar (they use native swipe) */
scrollbar-width: none;
cursor: grab;
user-select: none;
}
.episode-scroller::-webkit-scrollbar { display: none; }
.episode-scroller.is-dragging { cursor: grabbing; scroll-behavior: auto; }
/* Desktop (pointer device): show a thin styled scrollbar */
@media (hover: hover) and (pointer: fine) {
.episode-scroller {
scrollbar-width: thin;
scrollbar-color: rgba(255,149,0,0.5) rgba(255,255,255,0.05);
padding-bottom: 20px;
} }
.episode-scroller::-webkit-scrollbar { .episode-scroller::-webkit-scrollbar {
display: none; display: block;
} /* Chrome/Safari */ height: 4px;
}
.episode-scroller::-webkit-scrollbar-track {
background: rgba(255,255,255,0.05);
border-radius: 2px;
}
.episode-scroller::-webkit-scrollbar-thumb {
background: rgba(255,149,0,0.5);
border-radius: 2px;
}
.episode-scroller::-webkit-scrollbar-thumb:hover {
background: var(--accent-color);
}
}
.ep-card { .ep-card {
flex: 0 0 220px; flex: 0 0 220px;
@@ -458,6 +484,42 @@
overflow: hidden; overflow: hidden;
} }
/* Locate (crosshairs) button in scroller header */
.locate-btn {
background: var(--surface-color);
border: 1px solid var(--border-color);
color: var(--text-secondary);
width: 30px;
height: 30px;
border-radius: 8px;
display: flex;
align-items: center;
justify-content: center;
cursor: pointer;
font-size: 0.75rem;
transition: all 0.2s;
flex-shrink: 0;
}
.locate-btn:hover {
border-color: var(--accent-color);
color: var(--accent-color);
background: rgba(255,149,0,0.08);
transform: translateY(-1px);
}
.locate-btn:active {
transform: scale(0.92);
}
/* Highlight pulse — applied briefly when the card is focused */
@keyframes ep-highlight-pulse {
0% { box-shadow: 0 0 0 0 rgba(255,149,0,0.7), 0 0 20px rgba(255,149,0,0.15); }
40% { box-shadow: 0 0 0 6px rgba(255,149,0,0), 0 0 30px rgba(255,149,0,0.3); }
100% { box-shadow: 0 0 0 0 rgba(255,149,0,0), 0 0 20px rgba(255,149,0,0.15); }
}
.ep-card.active.ep-highlight {
animation: ep-highlight-pulse 0.75s cubic-bezier(0.23, 1, 0.32, 1) forwards;
}
/* Synopsis Section */ /* Synopsis Section */
#synopsis-container { #synopsis-container {
background: var(--surface-color); background: var(--surface-color);
@@ -554,14 +616,12 @@
<div class="scroller-section"> <div class="scroller-section">
<div class="scroller-header"> <div class="scroller-header">
<h3>Episodes</h3> <h3>Episodes</h3>
<div <div style="display:flex; align-items:center; gap:10px;">
id="scroller-count" <div id="scroller-count" style="font-size:0.8rem; font-weight:700; color:var(--text-secondary);"></div>
style=" <button id="locate-ep-btn" class="locate-btn" title="Jump to current episode">
font-size: 0.8rem; <i class="fas fa-location-crosshairs"></i>
font-weight: 700; </button>
color: var(--text-secondary); </div>
"
></div>
</div> </div>
<div class="episode-scroller" id="episode-scroller"> <div class="episode-scroller" id="episode-scroller">
<!-- Episodes & Boundary cards injected here --> <!-- Episodes & Boundary cards injected here -->
@@ -577,7 +637,11 @@
<script> <script>
const params = new URLSearchParams(window.location.search); const params = new URLSearchParams(window.location.search);
const jikanId = params.get("id"); const jikanId = params.get("id");
const currentEp = parseInt(params.get("ep")); let currentEp = parseInt(params.get("ep"));
// Episode list cache — populated once by loadEpisodeScroller()
let allEpsData = []; // flat array of Kitsu episode objects
const epCards = {}; // { epNumber: <DOM card element> } for O(1) active-state updates
const DOM = { const DOM = {
player: document.getElementById("player-container"), player: document.getElementById("player-container"),
@@ -601,6 +665,37 @@
document.addEventListener("DOMContentLoaded", async () => { document.addEventListener("DOMContentLoaded", async () => {
if (!jikanId || !currentEp) return; if (!jikanId || !currentEp) return;
// ── Desktop scroller: wheel → horizontal scroll, mouse drag ──────
const scroller = document.getElementById("episode-scroller");
scroller.addEventListener("wheel", (e) => {
// Only hijack pure vertical wheel events (not touchpad horizontal swipes)
if (Math.abs(e.deltaX) > Math.abs(e.deltaY)) return;
e.preventDefault();
scroller.scrollLeft += e.deltaY * 1.5;
}, { passive: false });
let drag = { active: false, startX: 0, scrollLeft: 0 };
scroller.addEventListener("mousedown", (e) => {
drag.active = true;
drag.startX = e.pageX - scroller.offsetLeft;
drag.scrollLeft = scroller.scrollLeft;
scroller.classList.add("is-dragging");
});
const endDrag = () => { drag.active = false; scroller.classList.remove("is-dragging"); };
scroller.addEventListener("mouseleave", endDrag);
scroller.addEventListener("mouseup", endDrag);
scroller.addEventListener("mousemove", (e) => {
if (!drag.active) return;
e.preventDefault();
const x = e.pageX - scroller.offsetLeft;
const walk = (x - drag.startX) * 1.2;
scroller.scrollLeft = drag.scrollLeft - walk;
});
// ── Locate button: scroll to & flash the active episode card ─────
document.getElementById("locate-ep-btn").addEventListener("click", locateCurrentEpisode);
// ─────────────────────────────────────────────────────────────────
setupUI(); setupUI();
await initialize(); await initialize();
}); });
@@ -839,7 +934,110 @@
} }
} }
/* --- PHASE 5: EPISODE SCROLLER & CROSS-SEASON --- */ /* ─── Locate current episode ─────────────────────────────────────
Scrolls the active card into view and fires a brief glow pulse
so the user's eye is drawn to it. */
function locateCurrentEpisode() {
const card = epCards[currentEp];
if (!card) return;
card.scrollIntoView({ behavior: "smooth", inline: "center", block: "nearest" });
// Re-trigger the animation by removing + re-adding the class
card.classList.remove("ep-highlight");
// Force reflow so removing the class is visible before re-adding
void card.offsetWidth;
card.classList.add("ep-highlight");
card.addEventListener("animationend", () => card.classList.remove("ep-highlight"), { once: true });
}
/* ─── In-page episode switching ─────────────────────────────────
Called from: episode card clicks, animex-next-episode messages,
and the browser back/forward buttons (popstate).
Never does a full page reload — everything is updated in place. */
async function switchEpisode(num) {
num = parseInt(num);
if (isNaN(num) || num === currentEp) return;
// 0. Kill the current iframe immediately so audio/video stops now,
// not after the loading overlay finishes fading in.
const liveIframe = DOM.player.querySelector("iframe");
if (liveIframe) liveIframe.src = "about:blank";
const prevEp = currentEp;
currentEp = num;
// 1. Update URL (no reload)
params.set("ep", num);
history.pushState({ ep: num }, "", "?" + params.toString());
// 2. Update the meta line episode number
DOM.epNum.textContent = num;
// 3. Swap active state on scroller cards
const prevCard = epCards[prevEp];
const nextCard = epCards[num];
if (prevCard) {
prevCard.classList.remove("active");
prevCard.querySelector(".now-playing-badge")?.remove();
}
if (nextCard) {
nextCard.classList.add("active");
const thumbEl = nextCard.querySelector(".ep-card-thumb");
if (thumbEl && !thumbEl.querySelector(".now-playing-badge")) {
const badge = document.createElement("div");
badge.className = "now-playing-badge";
badge.textContent = "Playing";
thumbEl.appendChild(badge);
}
setTimeout(() =>
nextCard.scrollIntoView({ behavior: "smooth", inline: "center", block: "nearest" }),
100
);
}
// 4. Update per-episode metadata (synopsis, loading backdrop)
updateEpisodeMeta(num);
// 5. Reload the player for the new episode
await loadPlayer(userPref === "dub", currentModule);
// 6. Persist progress
logProgress();
}
/* Updates synopsis and loading-screen thumbnail for a given episode
using the already-fetched allEpsData array — no extra network call. */
function updateEpisodeMeta(num) {
const ep = allEpsData.find(e => e.attributes?.number == num);
if (!ep) return;
const attrs = ep.attributes || {};
// Loading screen backdrop
const thumb = attrs.thumbnail?.original;
if (thumb) {
DOM.loading.style.backgroundImage =
`url(/proxy-image?url=${encodeURIComponent(thumb)})`;
} else if (cachedAnimeData?.poster) {
DOM.loading.style.backgroundImage = `url(${cachedAnimeData.poster})`;
}
// Episode synopsis
const desc = attrs.synopsis || attrs.description || cachedAnimeData?.synopsis || "";
DOM.synopsis.textContent = desc || "No description available.";
// Loading screen synopsis mirror
const loadSyn = document.getElementById("loading-synopsis");
if (loadSyn) loadSyn.textContent = DOM.synopsis.textContent;
}
/* Browser back/forward: re-sync state to what's in the URL */
window.addEventListener("popstate", (event) => {
const ep = event.state?.ep ?? parseInt(new URLSearchParams(window.location.search).get("ep"));
if (ep && ep !== currentEp) switchEpisode(ep);
});
async function loadEpisodeScroller() { async function loadEpisodeScroller() {
// Movies don't have an episode list // Movies don't have an episode list
if (cachedAnimeData && cachedAnimeData.type === "Movie") return; if (cachedAnimeData && cachedAnimeData.type === "Movie") return;
@@ -848,16 +1046,16 @@
'<div class="loader-spinner" style="position:relative; transform:none; left:0; top:0; margin: 20px auto;"></div>'; '<div class="loader-spinner" style="position:relative; transform:none; left:0; top:0; margin: 20px auto;"></div>';
try { try {
// 1. Fetch current season episodes (Using the new cached endpoint logic) // 1. Fetch ALL episodes from the backend cache endpoint.
const mapRes = await fetch(`/map/mal/${jikanId}`); // The server handles Kitsu pagination, smart TTLs per airing status,
const map = await mapRes.json(); // and background refreshes — so we always get the complete list instantly.
const epRes = await fetch(`/anime/${jikanId}/episodes`);
if (!epRes.ok) throw new Error("Episode cache unavailable");
const epData = await epRes.json();
const eps = epData.episodes || [];
// We fetch the full list of episodes from Kitsu (highly cached on backend) // Keep a module-level copy for updateEpisodeMeta() lookups
const kitsuEpRes = await fetch( allEpsData = eps;
`/proxy?url=https://kitsu.io/api/edge/anime/${map.kitsu_id}/episodes?page[limit]=20&sort=number`,
);
const kitsuData = await kitsuEpRes.json();
const eps = kitsuData.data;
// 2. Fetch Season Mapping for Boundaries // 2. Fetch Season Mapping for Boundaries
const seasonRes = await fetch(`/anime/${jikanId}/seasons`); const seasonRes = await fetch(`/anime/${jikanId}/seasons`);
@@ -905,11 +1103,10 @@
</div> </div>
`; `;
card.onclick = () => { card.onclick = () => switchEpisode(num);
if (num == currentEp) return;
params.set("ep", num); // Register in lookup map so switchEpisode() can find this card in O(1)
window.location.search = params.toString(); epCards[num] = card;
};
DOM.scroller.appendChild(card); DOM.scroller.appendChild(card);
@@ -948,7 +1145,10 @@
} }
document.getElementById("scroller-count").textContent = document.getElementById("scroller-count").textContent =
`${eps.length} Episodes Total`; `${eps.length} Episodes`;
// Auto-locate on initial load so the active card is visible + highlighted
setTimeout(locateCurrentEpisode, 400);
} catch (e) { } catch (e) {
DOM.scroller.innerHTML = DOM.scroller.innerHTML =
'<p style="color:var(--text-secondary); font-size:0.8rem;">Episode list unavailable.</p>'; '<p style="color:var(--text-secondary); font-size:0.8rem;">Episode list unavailable.</p>';
@@ -987,13 +1187,10 @@
} }
} }
// Listen for messages from the player iframe // Listen for messages from the player iframe (e.g. "next episode" button inside player)
window.addEventListener("message", (event) => { window.addEventListener("message", (event) => {
if (event.data && event.data.type === "animex-next-episode") { if (event.data && event.data.type === "animex-next-episode") {
const newParams = new URLSearchParams(window.location.search); switchEpisode(currentEp + 1);
const nextEp = currentEp + 1;
newParams.set("ep", nextEp);
window.location.search = newParams.toString();
} }
}); });
</script> </script>

131
app.py
View File

@@ -837,27 +837,65 @@ async def mal_to_kitsu(mal_id: int):
raise HTTPException(status_code=404, detail="Mapping not found for this MAL ID.") raise HTTPException(status_code=404, detail="Mapping not found for this MAL ID.")
async def get_jikan_anime_status(mal_id: int) -> str:
"""
Returns the airing status string from Jikan (e.g. 'Finished Airing',
'Currently Airing', 'Not yet aired'). Uses the existing Jikan disk cache
so we never make redundant network requests.
"""
url = f"https://api.jikan.moe/v4/anime/{mal_id}"
cached = load_cache(url)
if cached:
return cached.get("data", {}).get("status", "unknown")
try:
r = await hybrid_client.get(url, timeout=12)
if r.status_code == 200:
data = r.json()
save_cache(url, data)
return data.get("data", {}).get("status", "unknown")
except Exception:
pass
return "unknown"
async def refresh_kitsu_episodes_cache(mal_id: int, kitsu_id: int): async def refresh_kitsu_episodes_cache(mal_id: int, kitsu_id: int):
""" """
Background task to crawl Kitsu and update the local JSON cache. Background task to crawl Kitsu and update the local JSON cache.
Optimized for 1000+ episode series. Optimized for 1000+ episode series (One Piece, Naruto, etc.).
Strategy:
- Finished shows → full cache is permanent, never re-fetched.
- Currently Airing → overlap the last 20 episodes so new ones are caught.
- Unknown → same overlap strategy as airing.
""" """
cache_path = get_episodes_cache_path(mal_id) cache_path = get_episodes_cache_path(mal_id)
existing_data = {"episodes": [], "last_updated": 0, "status": "unknown"} existing_data = {"episodes": [], "last_updated": 0, "airing_status": "unknown"}
if os.path.exists(cache_path): if os.path.exists(cache_path):
try: try:
with open(cache_path, 'r', encoding='utf-8') as f: with open(cache_path, 'r', encoding='utf-8') as f:
existing_data = json.load(f) existing_data = json.load(f)
except: pass except Exception:
pass
# Get the latest episode count we currently have # --- Resolve airing status (Jikan) ---
# We'll start fetching from a point that ensures we don't miss anything airing_status = await get_jikan_anime_status(mal_id)
# but don't re-download the whole show.
# Finished shows: cache is already complete — nothing to do
if airing_status in ("Finished Airing", "Finished") and existing_data.get("episodes"):
print(f"MAL:{mal_id} is finished airing — skipping Kitsu refresh (cache complete)")
# Ensure the cached file carries the resolved status
if existing_data.get("airing_status") != airing_status:
existing_data["airing_status"] = airing_status
with open(cache_path, 'w', encoding='utf-8') as f:
json.dump(existing_data, f)
return
# --- Determine fetch start offset ---
current_count = len(existing_data.get("episodes", [])) current_count = len(existing_data.get("episodes", []))
offset = max(0, current_count - 20) # Overlap by 20 to catch updates # Overlap by 20 to catch any last-minute title/thumbnail updates
offset = max(0, current_count - 20)
new_episodes = [] new_episodes: list = []
base_url = f"https://kitsu.io/api/edge/anime/{kitsu_id}/episodes" base_url = f"https://kitsu.io/api/edge/anime/{kitsu_id}/episodes"
try: try:
@@ -865,74 +903,103 @@ async def refresh_kitsu_episodes_cache(mal_id: int, kitsu_id: int):
while current_url: while current_url:
r = await hybrid_client.get(current_url, timeout=15) r = await hybrid_client.get(current_url, timeout=15)
if r.status_code != 200: break if r.status_code != 200:
break
resp_json = r.json() resp_json = r.json()
batch = resp_json.get("data", []) batch = resp_json.get("data", [])
if not batch: break if not batch:
break
new_episodes.extend(batch) new_episodes.extend(batch)
current_url = resp_json.get("links", {}).get("next") current_url = resp_json.get("links", {}).get("next")
# Safety for infinite loops # Hard safety cap — Kitsu pagination should never exceed this
if len(new_episodes) > 2000: break if len(new_episodes) > 5000:
break
# Merge Logic: Use a dict keyed by episode number to overwrite/append # --- Merge: keyed by episode number so updates overwrite stale entries ---
merged = {ep["attributes"]["number"]: ep for ep in existing_data.get("episodes", [])} merged: dict = {ep["attributes"]["number"]: ep for ep in existing_data.get("episodes", [])}
for ep in new_episodes: for ep in new_episodes:
merged[ep["attributes"]["number"]] = ep num = ep.get("attributes", {}).get("number")
if num is not None:
merged[num] = ep
# Sort and save
sorted_episodes = [merged[k] for k in sorted(merged.keys())] sorted_episodes = [merged[k] for k in sorted(merged.keys())]
updated_cache = { updated_cache = {
"episodes": sorted_episodes, "episodes": sorted_episodes,
"last_updated": time.time(), "last_updated": time.time(),
"kitsu_id": kitsu_id "kitsu_id": kitsu_id,
"airing_status": airing_status,
} }
with open(cache_path, 'w', encoding='utf-8') as f: with open(cache_path, 'w', encoding='utf-8') as f:
json.dump(updated_cache, f) json.dump(updated_cache, f)
print(f"Successfully cached {len(sorted_episodes)} episodes for MAL:{mal_id}") print(f"Cached {len(sorted_episodes)} episodes for MAL:{mal_id} (status: {airing_status})")
except Exception as e: except Exception as e:
print(f"Background refresh failed for MAL:{mal_id}: {e}") print(f"Background refresh failed for MAL:{mal_id}: {e}")
@app.get("/anime/{mal_id}/episodes") @app.get("/anime/{mal_id}/episodes")
async def get_anime_episodes_cached(mal_id: int, background_tasks: BackgroundTasks): async def get_anime_episodes_cached(mal_id: int, background_tasks: BackgroundTasks):
""" """
Main endpoint for series-info and view.html scroller. Main episode list endpoint used by both view.html and series-info.html.
Returns cached data instantly, triggers background refresh if stale.
Returns the full cached episode list instantly, then triggers a background
refresh when the data is considered stale. Staleness thresholds are driven
by the show's Jikan airing status so we never waste bandwidth on a series
that finished airing years ago:
- Finished Airing / Finished → permanent (never stale once cached)
- Currently Airing → 6-hour TTL (weekly episodes)
- Not yet aired / unknown → 12-hour TTL (default)
Response shape (same Kitsu episode objects the frontends already consume):
{
"episodes": [ <kitsu episode objects> ],
"kitsu_id": <int>,
"airing_status": "Currently Airing" | "Finished Airing" | ...,
"last_updated": <unix timestamp>
}
""" """
cache_path = get_episodes_cache_path(mal_id) cache_path = get_episodes_cache_path(mal_id)
# 1. Check mapping # 1. Resolve Kitsu mapping (fast — uses its own disk cache)
map_data = await mal_to_kitsu(mal_id) map_data = await mal_to_kitsu(mal_id)
kitsu_id = map_data["kitsu_id"] kitsu_id = map_data["kitsu_id"]
# 2. Check if cache exists # 2. Load disk cache
cache_exists = os.path.exists(cache_path) cache_exists = os.path.exists(cache_path)
cached_data = None cached_data: Optional[dict] = None
if cache_exists: if cache_exists:
try: try:
with open(cache_path, 'r', encoding='utf-8') as f: with open(cache_path, 'r', encoding='utf-8') as f:
cached_data = json.load(f) cached_data = json.load(f)
except: cache_exists = False except Exception:
cache_exists = False
# 3. Decision Logic # 3. Cold start — block until we have at least one full fetch
now = time.time() if not cache_exists or not cached_data:
# Stale if older than 12 hours
is_stale = not cached_data or (now - cached_data.get("last_updated", 0) > 43200)
if not cache_exists:
# First time loading: Must wait for at least one batch
await refresh_kitsu_episodes_cache(mal_id, kitsu_id) await refresh_kitsu_episodes_cache(mal_id, kitsu_id)
with open(cache_path, 'r', encoding='utf-8') as f: with open(cache_path, 'r', encoding='utf-8') as f:
return json.load(f) return json.load(f)
# 4. Determine staleness based on airing status
airing_status = cached_data.get("airing_status", "unknown")
now = time.time()
last_updated = cached_data.get("last_updated", 0)
if airing_status in ("Finished Airing", "Finished"):
is_stale = False # permanent — never re-fetch
elif airing_status == "Currently Airing":
is_stale = (now - last_updated) > 21600 # 6 hours
else:
is_stale = (now - last_updated) > 43200 # 12 hours (default)
# 5. Kick off a background refresh if stale, but serve cached data right away
if is_stale: if is_stale:
# Return stale data immediately, but update in background
background_tasks.add_task(refresh_kitsu_episodes_cache, mal_id, kitsu_id) background_tasks.add_task(refresh_kitsu_episodes_cache, mal_id, kitsu_id)
return cached_data return cached_data

1
data/cache/episodes/mal_21.json vendored Normal file

File diff suppressed because one or more lines are too long

1
data/cache/episodes/mal_57658.json vendored Normal file

File diff suppressed because one or more lines are too long

View File

@@ -1 +0,0 @@
{"payload": {"entries": [{"anilist_id": 11179, "mal_id": 11179, "title_romaji": "Papa no Iukoto wo Kikinasai!", "title_english": "Listen to Me, Girls. I Am Your Father!", "title_native": "\u30d1\u30d1\u306e\u3044\u3046\u3053\u3068\u3092\u805e\u304d\u306a\u3055\u3044\uff01", "format": "TV", "start_date": {"year": 2012, "month": 1, "day": 11}, "season": "WINTER", "season_year": 2012, "episodes": 12, "relation_type_from_parent": null, "inferred_part": null, "is_season": true, "is_final_from_title": false, "title_display": "Listen to Me, Girls. I Am Your Father!", "_start_tuple": [2012, 1, 11], "is_final_season": false}, {"anilist_id": 12673, "mal_id": 12673, "title_romaji": "Papa no Iukoto wo Kikinasai!: Pokkapoka", "title_english": null, "title_native": "\u30d1\u30d1\u306e\u3044\u3046\u3053\u3068\u3092\u805e\u304d\u306a\u3055\u3044\uff01\u307d\u3063\u304b\u307d\u304b", "format": "SPECIAL", "start_date": {"year": 2012, "month": 7, "day": 11}, "season": "SUMMER", "season_year": 2012, "episodes": 1, "relation_type_from_parent": null, "inferred_part": null, "is_season": false, "is_final_from_title": false, "title_display": "Papa no Iukoto wo Kikinasai!: Pokkapoka", "_start_tuple": [2012, 7, 11], "is_final_season": false}, {"anilist_id": 17875, "mal_id": 17875, "title_romaji": "Papa no Iukoto wo Kikinasai! OVA", "title_english": null, "title_native": "\u30d1\u30d1\u306e\u3044\u3046\u3053\u3068\u3092\u805e\u304d\u306a\u3055\u3044! OVA", "format": "OVA", "start_date": {"year": 2013, "month": 6, "day": 25}, "season": "SUMMER", "season_year": 2013, "episodes": 2, "relation_type_from_parent": null, "inferred_part": null, "is_season": false, "is_final_from_title": false, "title_display": "Papa no Iukoto wo Kikinasai! OVA", "_start_tuple": [2013, 6, 25], "is_final_season": false}], "season_groups": [{"season_index": 1, "group_label": "Season 1", "parts": [{"short_label": "S1", "title": "Listen to Me, Girls. I Am Your Father!", "mal_id": 11179, "anilist_id": 11179, "start_date": {"year": 2012, "month": 1, "day": 11}, "format": "TV", "is_final": false}]}], "extras": [{"title": "Papa no Iukoto wo Kikinasai!: Pokkapoka", "mal_id": 12673, "anilist_id": 12673, "format": "SPECIAL", "start_date": {"year": 2012, "month": 7, "day": 11}}, {"title": "Papa no Iukoto wo Kikinasai! OVA", "mal_id": 17875, "anilist_id": 17875, "format": "OVA", "start_date": {"year": 2013, "month": 6, "day": 25}}]}, "_timestamp": 1775005857.750725}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -1 +0,0 @@
{"payload": {"entries": [{"anilist_id": 153288, "mal_id": 52588, "title_romaji": "Kaijuu 8-gou", "title_english": "Kaiju No. 8", "title_native": "\u602a\u7363\uff18\u53f7", "format": "TV", "start_date": {"year": 2024, "month": 4, "day": 13}, "season": "SPRING", "season_year": 2024, "episodes": 12, "relation_type_from_parent": null, "inferred_part": null, "is_season": true, "is_final_from_title": false, "title_display": "Kaiju No. 8", "_start_tuple": [2024, 4, 13], "is_final_season": false}, {"anilist_id": 178754, "mal_id": 59177, "title_romaji": "Kaijuu 8-gou 2nd Season", "title_english": "Kaiju No. 8 Season 2", "title_native": "\u602a\u7363\uff18\u53f7 \u7b2c\uff12\u671f", "format": "TV", "start_date": {"year": 2025, "month": 7, "day": 19}, "season": "SUMMER", "season_year": 2025, "episodes": 11, "relation_type_from_parent": null, "inferred_part": 2, "is_season": true, "is_final_from_title": false, "title_display": "Kaiju No. 8 Season 2", "_start_tuple": [2025, 7, 19], "is_final_season": false}, {"anilist_id": 204362, "mal_id": 63136, "title_romaji": "Kaijuu 8-gou: Kanketsu-hen", "title_english": null, "title_native": "\u602a\u7363\uff18\u53f7 \u5b8c\u7d50\u7de8", "format": null, "start_date": {"year": null, "month": null, "day": null}, "season": null, "season_year": null, "episodes": null, "relation_type_from_parent": null, "inferred_part": null, "is_season": false, "is_final_from_title": false, "title_display": "Kaijuu 8-gou: Kanketsu-hen", "_start_tuple": [0, 0, 0], "is_final_season": false}, {"anilist_id": 179998, "mal_id": 59489, "title_romaji": "Kaijuu 8-gou Movie", "title_english": "Kaiju No. 8: Mission Recon", "title_native": "\u602a\u73638\u53f7 \u7b2c1\u671f\u7dcf\u96c6\u7de8", "format": "MOVIE", "start_date": {"year": 2025, "month": 3, "day": 28}, "season": "WINTER", "season_year": 2025, "episodes": 1, "relation_type_from_parent": null, "inferred_part": null, "is_season": false, "is_final_from_title": false, "title_display": "Kaiju No. 8: Mission Recon", "_start_tuple": [2025, 3, 28], "is_final_season": false}], "season_groups": [{"season_index": 1, "group_label": "Season 1", "parts": [{"short_label": "S1", "title": "Kaiju No. 8", "mal_id": 52588, "anilist_id": 153288, "start_date": {"year": 2024, "month": 4, "day": 13}, "format": "TV", "is_final": false}]}, {"season_index": 2, "group_label": "Season 2", "parts": [{"short_label": "S2", "title": "Kaiju No. 8 Season 2", "mal_id": 59177, "anilist_id": 178754, "start_date": {"year": 2025, "month": 7, "day": 19}, "format": "TV", "is_final": false}]}], "extras": [{"title": "Kaijuu 8-gou: Kanketsu-hen", "mal_id": 63136, "anilist_id": 204362, "format": null, "start_date": {"year": null, "month": null, "day": null}}, {"title": "Kaiju No. 8: Mission Recon", "mal_id": 59489, "anilist_id": 179998, "format": "MOVIE", "start_date": {"year": 2025, "month": 3, "day": 28}}]}, "_timestamp": 1774929755.5327868}

View File

@@ -1 +0,0 @@
{"payload": {"entries": [{"anilist_id": 178754, "mal_id": 59177, "title_romaji": "Kaijuu 8-gou 2nd Season", "title_english": "Kaiju No. 8 Season 2", "title_native": "\u602a\u7363\uff18\u53f7 \u7b2c\uff12\u671f", "format": "TV", "start_date": {"year": 2025, "month": 7, "day": 19}, "season": "SUMMER", "season_year": 2025, "episodes": 11, "relation_type_from_parent": null, "inferred_part": 2, "is_season": true, "is_final_from_title": false, "title_display": "Kaiju No. 8 Season 2", "_start_tuple": [2025, 7, 19], "is_final_season": false}, {"anilist_id": 204362, "mal_id": 63136, "title_romaji": "Kaijuu 8-gou: Kanketsu-hen", "title_english": null, "title_native": "\u602a\u7363\uff18\u53f7 \u5b8c\u7d50\u7de8", "format": null, "start_date": {"year": null, "month": null, "day": null}, "season": null, "season_year": null, "episodes": null, "relation_type_from_parent": null, "inferred_part": null, "is_season": false, "is_final_from_title": false, "title_display": "Kaijuu 8-gou: Kanketsu-hen", "_start_tuple": [0, 0, 0], "is_final_season": false}, {"anilist_id": 179998, "mal_id": 59489, "title_romaji": "Kaijuu 8-gou Movie", "title_english": "Kaiju No. 8: Mission Recon", "title_native": "\u602a\u73638\u53f7 \u7b2c1\u671f\u7dcf\u96c6\u7de8", "format": "MOVIE", "start_date": {"year": 2025, "month": 3, "day": 28}, "season": "WINTER", "season_year": 2025, "episodes": 1, "relation_type_from_parent": null, "inferred_part": null, "is_season": false, "is_final_from_title": false, "title_display": "Kaiju No. 8: Mission Recon", "_start_tuple": [2025, 3, 28], "is_final_season": false}, {"anilist_id": 153288, "mal_id": 52588, "title_romaji": "Kaijuu 8-gou", "title_english": "Kaiju No. 8", "title_native": "\u602a\u7363\uff18\u53f7", "format": "TV", "start_date": {"year": 2024, "month": 4, "day": 13}, "season": "SPRING", "season_year": 2024, "episodes": 12, "relation_type_from_parent": null, "inferred_part": null, "is_season": true, "is_final_from_title": false, "title_display": "Kaiju No. 8", "_start_tuple": [2024, 4, 13], "is_final_season": false}], "season_groups": [{"season_index": 1, "group_label": "Season 1", "parts": [{"short_label": "S1", "title": "Kaiju No. 8", "mal_id": 52588, "anilist_id": 153288, "start_date": {"year": 2024, "month": 4, "day": 13}, "format": "TV", "is_final": false}]}, {"season_index": 2, "group_label": "Season 2", "parts": [{"short_label": "S2", "title": "Kaiju No. 8 Season 2", "mal_id": 59177, "anilist_id": 178754, "start_date": {"year": 2025, "month": 7, "day": 19}, "format": "TV", "is_final": false}]}], "extras": [{"title": "Kaijuu 8-gou: Kanketsu-hen", "mal_id": 63136, "anilist_id": 204362, "format": null, "start_date": {"year": null, "month": null, "day": null}}, {"title": "Kaiju No. 8: Mission Recon", "mal_id": 59489, "anilist_id": 179998, "format": "MOVIE", "start_date": {"year": 2025, "month": 3, "day": 28}}]}, "_timestamp": 1774929794.0022862}

View File

@@ -0,0 +1 @@
{"payload": {"mal_id": 656, "url": "https://myanimelist.net/manga/656/Vagabond", "images": {"jpg": {"image_url": "https://myanimelist.net/images/manga/1/259070.jpg", "small_image_url": "https://myanimelist.net/images/manga/1/259070t.jpg", "large_image_url": "https://myanimelist.net/images/manga/1/259070l.jpg"}, "webp": {"image_url": "https://myanimelist.net/images/manga/1/259070.webp", "small_image_url": "https://myanimelist.net/images/manga/1/259070t.webp", "large_image_url": "https://myanimelist.net/images/manga/1/259070l.webp"}}, "approved": true, "titles": [{"type": "Default", "title": "Vagabond"}, {"type": "Japanese", "title": "\u30d0\u30ac\u30dc\u30f3\u30c9"}, {"type": "English", "title": "Vagabond"}], "title": "Vagabond", "title_english": "Vagabond", "title_japanese": "\u30d0\u30ac\u30dc\u30f3\u30c9", "title_synonyms": [], "type": "Manga", "chapters": 327, "volumes": 37, "status": "On Hiatus", "publishing": false, "published": {"from": "1998-09-03T00:00:00+00:00", "to": "2015-05-21T00:00:00+00:00", "prop": {"from": {"day": 3, "month": 9, "year": 1998}, "to": {"day": 21, "month": 5, "year": 2015}}, "string": "Sep 3, 1998 to May 21, 2015"}, "score": 9.27, "scored": 9.27, "scored_by": 174602, "rank": 3, "popularity": 12, "members": 455240, "favorites": 48512, "synopsis": "In 16th-century Japan, Shinmen Takezou is a wild, rough young man in both his appearance and his actions. His aggressive nature has won him the collective reproach and fear of his village, leading him and his best friend, Matahachi Honiden, to run away in search of something grander than provincial life. The pair enlist in the Toyotomi army, yearning for glory\u2014but when the Toyotomi suffer a crushing defeat at the hands of the Tokugawa Clan at the Battle of Sekigahara, the friends barely make it out alive.\n\nAfter the two are separated, Shinmen returns home on a self-appointed mission to notify the Hon'iden family of Matahachi's survival. He instead finds himself a wanted criminal, framed for his friend's supposed murder based on his history of violence. Upon being captured, he is strung up on a tree and left to die. An itinerant monk, the distinguished Takuan Soho, takes pity on the \"devil child,\" secretly freeing Shinmen and christening him with a new name to avoid pursuit by the authorities: Musashi Miyamoto.\n\nVagabond is the fictitious retelling of the life of one of Japan's most renowned swordsmen, the \"Sword Saint\" Musashi Miyamoto\u2014his rise from a swordsman with no desire other than to become \"Invincible Under the Heavens\" to an enlightened warrior who slowly learns of the importance of close friends, self-reflection, and life itself.\n\n[Written by MAL Rewrite]", "background": "Vagabond won the Japan Media Arts Festival Manga Grand Prize and the 24th Kodansha Manga Award in the General category in 2000. The series won the Tezuka Osamu Cultural Prize in 2002 and has had over 82 million copies sold worldwide. Vagabond was published in English by VIZ Media under the VIZ Signature imprint from April 5, 2002, to April 21, 2015, and in large 3-in-1 omnibuses under the VIZBIG imprint from September 16, 2008, to April 21, 2015. The English release got Takehiko Inoue a nomination for the Eisner Award in the Best Writer/Artist category in 2003. It was also published in Brazilian Portuguese by Panini Comics from February 2016 to February 2019.", "authors": [{"mal_id": 1911, "type": "people", "name": "Inoue, Takehiko", "url": "https://myanimelist.net/people/1911/Takehiko_Inoue"}, {"mal_id": 5760, "type": "people", "name": "Yoshikawa, Eiji", "url": "https://myanimelist.net/people/5760/Eiji_Yoshikawa"}], "serializations": [{"mal_id": 72, "type": "manga", "name": "Morning", "url": "https://myanimelist.net/manga/magazine/72/Morning"}], "genres": [{"mal_id": 1, "type": "manga", "name": "Action", "url": "https://myanimelist.net/manga/genre/1/Action"}, {"mal_id": 2, "type": "manga", "name": "Adventure", "url": "https://myanimelist.net/manga/genre/2/Adventure"}, {"mal_id": 46, "type": "manga", "name": "Award Winning", "url": "https://myanimelist.net/manga/genre/46/Award_Winning"}], "explicit_genres": [], "themes": [{"mal_id": 13, "type": "manga", "name": "Historical", "url": "https://myanimelist.net/manga/genre/13/Historical"}, {"mal_id": 21, "type": "manga", "name": "Samurai", "url": "https://myanimelist.net/manga/genre/21/Samurai"}], "demographics": [{"mal_id": 41, "type": "manga", "name": "Seinen", "url": "https://myanimelist.net/manga/genre/41/Seinen"}], "relations": [], "external": [{"name": "Official Site", "url": "http://morningmanga.com/lineup/15"}, {"name": "Wikipedia", "url": "https://en.wikipedia.org/wiki/Vagabond_(manga)"}, {"name": "Wikipedia", "url": "https://ja.wikipedia.org/wiki/%E3%83%90%E3%82%AC%E3%83%9C%E3%83%B3%E3%83%89"}]}, "_timestamp": 1775437997.008553}

View File

@@ -1 +0,0 @@
{"payload": {"comix": [{"title": "Chapter 6", "url": "1mql9:honki-de-shitemo-ii-desu-ka:3874900", "chapter_number": "6", "is_external": false}, {"title": "Chapter 5", "url": "1mql9:honki-de-shitemo-ii-desu-ka:3874755", "chapter_number": "5", "is_external": false}, {"title": "Chapter 4", "url": "1mql9:honki-de-shitemo-ii-desu-ka:3874561", "chapter_number": "4", "is_external": false}, {"title": "Chapter 3", "url": "1mql9:honki-de-shitemo-ii-desu-ka:3874447", "chapter_number": "3", "is_external": false}, {"title": "Chapter 2", "url": "1mql9:honki-de-shitemo-ii-desu-ka:3874343", "chapter_number": "2", "is_external": false}, {"title": "Chapter 1", "url": "1mql9:honki-de-shitemo-ii-desu-ka:3874222", "chapter_number": "1", "is_external": false}]}, "_timestamp": 1775087084.836826}

View File

@@ -1 +0,0 @@
{"payload": {"entries": [], "season_groups": [], "extras": []}, "_timestamp": 1774915035.3131928}

View File

@@ -1 +0,0 @@
{"payload": {"entries": [{"anilist_id": 113417, "mal_id": 40746, "title_romaji": "Overflow", "title_english": "Overflow", "title_native": "\u304a\u30fc\u3070\u30fc\u3075\u308d\u3049", "format": "ONA", "start_date": {"year": 2020, "month": 1, "day": 6}, "season": "WINTER", "season_year": 2020, "episodes": 8, "relation_type_from_parent": null, "inferred_part": null, "is_season": false, "is_final_from_title": false, "title_display": "Overflow", "_start_tuple": [2020, 1, 6], "is_final_season": false}], "season_groups": [], "extras": [{"title": "Overflow", "mal_id": 40746, "anilist_id": 113417, "format": "ONA", "start_date": {"year": 2020, "month": 1, "day": 6}}]}, "_timestamp": 1774963942.841442}

File diff suppressed because one or more lines are too long

View File

@@ -1 +0,0 @@
{"payload": {"entries": [], "season_groups": [], "extras": []}, "_timestamp": 1774915405.991542}

View File

@@ -1 +0,0 @@
{"payload": {"entries": [{"anilist_id": 151807, "mal_id": 52299, "title_romaji": "Ore dake Level Up na Ken", "title_english": "Solo Leveling", "title_native": "\u4ffa\u3060\u3051\u30ec\u30d9\u30eb\u30a2\u30c3\u30d7\u306a\u4ef6", "format": "TV", "start_date": {"year": 2024, "month": 1, "day": 7}, "season": "WINTER", "season_year": 2024, "episodes": 12, "relation_type_from_parent": null, "inferred_part": null, "is_season": true, "is_final_from_title": false, "title_display": "Solo Leveling", "_start_tuple": [2024, 1, 7], "is_final_season": false}, {"anilist_id": 176496, "mal_id": 58567, "title_romaji": "Ore dake Level Up na Ken: Season 2 - Arise from the Shadow", "title_english": "Solo Leveling Season 2 -Arise from the Shadow-", "title_native": "\u4ffa\u3060\u3051\u30ec\u30d9\u30eb\u30a2\u30c3\u30d7\u306a\u4ef6 Season 2 -Arise from the Shadow-", "format": "TV", "start_date": {"year": 2025, "month": 1, "day": 5}, "season": "WINTER", "season_year": 2025, "episodes": 13, "relation_type_from_parent": null, "inferred_part": 2, "is_season": true, "is_final_from_title": false, "title_display": "Solo Leveling Season 2 -Arise from the Shadow-", "_start_tuple": [2025, 1, 5], "is_final_season": false}, {"anilist_id": 184694, "mal_id": 59841, "title_romaji": "Ore dake Level Up na Ken: ReAwakening", "title_english": "Solo Leveling -ReAwakening-", "title_native": "\u4ffa\u3060\u3051\u30ec\u30d9\u30eb\u30a2\u30c3\u30d7\u306a\u4ef6 -ReAwakening-", "format": "MOVIE", "start_date": {"year": 2024, "month": 11, "day": 29}, "season": "FALL", "season_year": 2024, "episodes": 1, "relation_type_from_parent": null, "inferred_part": null, "is_season": false, "is_final_from_title": false, "title_display": "Solo Leveling -ReAwakening-", "_start_tuple": [2024, 11, 29], "is_final_season": false}], "season_groups": [{"season_index": 1, "group_label": "Season 1", "parts": [{"short_label": "S1", "title": "Solo Leveling", "mal_id": 52299, "anilist_id": 151807, "start_date": {"year": 2024, "month": 1, "day": 7}, "format": "TV", "is_final": false}]}, {"season_index": 2, "group_label": "Season 2", "parts": [{"short_label": "S2", "title": "Solo Leveling Season 2 -Arise from the Shadow-", "mal_id": 58567, "anilist_id": 176496, "start_date": {"year": 2025, "month": 1, "day": 5}, "format": "TV", "is_final": false}]}], "extras": [{"title": "Solo Leveling -ReAwakening-", "mal_id": 59841, "anilist_id": 184694, "format": "MOVIE", "start_date": {"year": 2024, "month": 11, "day": 29}}]}, "_timestamp": 1775000957.808718}

File diff suppressed because one or more lines are too long

View File

@@ -1 +0,0 @@
{"payload": {"entries": [{"anilist_id": 204362, "mal_id": 63136, "title_romaji": "Kaijuu 8-gou: Kanketsu-hen", "title_english": null, "title_native": "\u602a\u7363\uff18\u53f7 \u5b8c\u7d50\u7de8", "format": null, "start_date": {"year": null, "month": null, "day": null}, "season": null, "season_year": null, "episodes": null, "relation_type_from_parent": null, "inferred_part": null, "is_season": false, "is_final_from_title": false, "title_display": "Kaijuu 8-gou: Kanketsu-hen", "_start_tuple": [0, 0, 0], "is_final_season": false}, {"anilist_id": 178754, "mal_id": 59177, "title_romaji": "Kaijuu 8-gou 2nd Season", "title_english": "Kaiju No. 8 Season 2", "title_native": "\u602a\u7363\uff18\u53f7 \u7b2c\uff12\u671f", "format": "TV", "start_date": {"year": 2025, "month": 7, "day": 19}, "season": "SUMMER", "season_year": 2025, "episodes": 11, "relation_type_from_parent": null, "inferred_part": 2, "is_season": true, "is_final_from_title": false, "title_display": "Kaiju No. 8 Season 2", "_start_tuple": [2025, 7, 19], "is_final_season": false}, {"anilist_id": 179998, "mal_id": 59489, "title_romaji": "Kaijuu 8-gou Movie", "title_english": "Kaiju No. 8: Mission Recon", "title_native": "\u602a\u73638\u53f7 \u7b2c1\u671f\u7dcf\u96c6\u7de8", "format": "MOVIE", "start_date": {"year": 2025, "month": 3, "day": 28}, "season": "WINTER", "season_year": 2025, "episodes": 1, "relation_type_from_parent": null, "inferred_part": null, "is_season": false, "is_final_from_title": false, "title_display": "Kaiju No. 8: Mission Recon", "_start_tuple": [2025, 3, 28], "is_final_season": false}, {"anilist_id": 153288, "mal_id": 52588, "title_romaji": "Kaijuu 8-gou", "title_english": "Kaiju No. 8", "title_native": "\u602a\u7363\uff18\u53f7", "format": "TV", "start_date": {"year": 2024, "month": 4, "day": 13}, "season": "SPRING", "season_year": 2024, "episodes": 12, "relation_type_from_parent": null, "inferred_part": null, "is_season": true, "is_final_from_title": false, "title_display": "Kaiju No. 8", "_start_tuple": [2024, 4, 13], "is_final_season": false}], "season_groups": [{"season_index": 1, "group_label": "Season 1", "parts": [{"short_label": "S1", "title": "Kaiju No. 8", "mal_id": 52588, "anilist_id": 153288, "start_date": {"year": 2024, "month": 4, "day": 13}, "format": "TV", "is_final": false}]}, {"season_index": 2, "group_label": "Season 2", "parts": [{"short_label": "S2", "title": "Kaiju No. 8 Season 2", "mal_id": 59177, "anilist_id": 178754, "start_date": {"year": 2025, "month": 7, "day": 19}, "format": "TV", "is_final": false}]}], "extras": [{"title": "Kaijuu 8-gou: Kanketsu-hen", "mal_id": 63136, "anilist_id": 204362, "format": null, "start_date": {"year": null, "month": null, "day": null}}, {"title": "Kaiju No. 8: Mission Recon", "mal_id": 59489, "anilist_id": 179998, "format": "MOVIE", "start_date": {"year": 2025, "month": 3, "day": 28}}]}, "_timestamp": 1774929784.310032}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1 @@
{"payload": {"mangadex": [{"title": "Ch. 1 - To You, 2000 Years From Now", "url": "a2e05215-4d19-401e-8526-7659b2ddcc19", "chapter_number": "1", "is_external": false}]}, "_timestamp": 1775436651.154676}

View File

@@ -1 +0,0 @@
{"payload": {"entries": [{"anilist_id": 153288, "mal_id": 52588, "title_romaji": "Kaijuu 8-gou", "title_english": "Kaiju No. 8", "title_native": "\u602a\u7363\uff18\u53f7", "format": "TV", "start_date": {"year": 2024, "month": 4, "day": 13}, "season": "SPRING", "season_year": 2024, "episodes": 12, "relation_type_from_parent": null, "inferred_part": null, "is_season": true, "is_final_from_title": false, "title_display": "Kaiju No. 8", "_start_tuple": [2024, 4, 13], "is_final_season": false}, {"anilist_id": 178754, "mal_id": 59177, "title_romaji": "Kaijuu 8-gou 2nd Season", "title_english": "Kaiju No. 8 Season 2", "title_native": "\u602a\u7363\uff18\u53f7 \u7b2c\uff12\u671f", "format": "TV", "start_date": {"year": 2025, "month": 7, "day": 19}, "season": "SUMMER", "season_year": 2025, "episodes": 11, "relation_type_from_parent": null, "inferred_part": 2, "is_season": true, "is_final_from_title": false, "title_display": "Kaiju No. 8 Season 2", "_start_tuple": [2025, 7, 19], "is_final_season": false}, {"anilist_id": 204362, "mal_id": 63136, "title_romaji": "Kaijuu 8-gou: Kanketsu-hen", "title_english": null, "title_native": "\u602a\u7363\uff18\u53f7 \u5b8c\u7d50\u7de8", "format": null, "start_date": {"year": null, "month": null, "day": null}, "season": null, "season_year": null, "episodes": null, "relation_type_from_parent": null, "inferred_part": null, "is_season": false, "is_final_from_title": false, "title_display": "Kaijuu 8-gou: Kanketsu-hen", "_start_tuple": [0, 0, 0], "is_final_season": false}, {"anilist_id": 179998, "mal_id": 59489, "title_romaji": "Kaijuu 8-gou Movie", "title_english": "Kaiju No. 8: Mission Recon", "title_native": "\u602a\u73638\u53f7 \u7b2c1\u671f\u7dcf\u96c6\u7de8", "format": "MOVIE", "start_date": {"year": 2025, "month": 3, "day": 28}, "season": "WINTER", "season_year": 2025, "episodes": 1, "relation_type_from_parent": null, "inferred_part": null, "is_season": false, "is_final_from_title": false, "title_display": "Kaiju No. 8: Mission Recon", "_start_tuple": [2025, 3, 28], "is_final_season": false}], "season_groups": [{"season_index": 1, "group_label": "Season 1", "parts": [{"short_label": "S1", "title": "Kaiju No. 8", "mal_id": 52588, "anilist_id": 153288, "start_date": {"year": 2024, "month": 4, "day": 13}, "format": "TV", "is_final": false}]}, {"season_index": 2, "group_label": "Season 2", "parts": [{"short_label": "S2", "title": "Kaiju No. 8 Season 2", "mal_id": 59177, "anilist_id": 178754, "start_date": {"year": 2025, "month": 7, "day": 19}, "format": "TV", "is_final": false}]}], "extras": [{"title": "Kaijuu 8-gou: Kanketsu-hen", "mal_id": 63136, "anilist_id": 204362, "format": null, "start_date": {"year": null, "month": null, "day": null}}, {"title": "Kaiju No. 8: Mission Recon", "mal_id": 59489, "anilist_id": 179998, "format": "MOVIE", "start_date": {"year": 2025, "month": 3, "day": 28}}]}, "_timestamp": 1775001076.475924}

View File

@@ -1 +0,0 @@
{"payload": {"entries": [{"anilist_id": 176496, "mal_id": 58567, "title_romaji": "Ore dake Level Up na Ken: Season 2 - Arise from the Shadow", "title_english": "Solo Leveling Season 2 -Arise from the Shadow-", "title_native": "\u4ffa\u3060\u3051\u30ec\u30d9\u30eb\u30a2\u30c3\u30d7\u306a\u4ef6 Season 2 -Arise from the Shadow-", "format": "TV", "start_date": {"year": 2025, "month": 1, "day": 5}, "season": "WINTER", "season_year": 2025, "episodes": 13, "relation_type_from_parent": null, "inferred_part": 2, "is_season": true, "is_final_from_title": false, "title_display": "Solo Leveling Season 2 -Arise from the Shadow-", "_start_tuple": [2025, 1, 5], "is_final_season": false}, {"anilist_id": 184694, "mal_id": 59841, "title_romaji": "Ore dake Level Up na Ken: ReAwakening", "title_english": "Solo Leveling -ReAwakening-", "title_native": "\u4ffa\u3060\u3051\u30ec\u30d9\u30eb\u30a2\u30c3\u30d7\u306a\u4ef6 -ReAwakening-", "format": "MOVIE", "start_date": {"year": 2024, "month": 11, "day": 29}, "season": "FALL", "season_year": 2024, "episodes": 1, "relation_type_from_parent": null, "inferred_part": null, "is_season": false, "is_final_from_title": false, "title_display": "Solo Leveling -ReAwakening-", "_start_tuple": [2024, 11, 29], "is_final_season": false}, {"anilist_id": 151807, "mal_id": 52299, "title_romaji": "Ore dake Level Up na Ken", "title_english": "Solo Leveling", "title_native": "\u4ffa\u3060\u3051\u30ec\u30d9\u30eb\u30a2\u30c3\u30d7\u306a\u4ef6", "format": "TV", "start_date": {"year": 2024, "month": 1, "day": 7}, "season": "WINTER", "season_year": 2024, "episodes": 12, "relation_type_from_parent": null, "inferred_part": null, "is_season": true, "is_final_from_title": false, "title_display": "Solo Leveling", "_start_tuple": [2024, 1, 7], "is_final_season": false}], "season_groups": [{"season_index": 1, "group_label": "Season 1", "parts": [{"short_label": "S1", "title": "Solo Leveling", "mal_id": 52299, "anilist_id": 151807, "start_date": {"year": 2024, "month": 1, "day": 7}, "format": "TV", "is_final": false}]}, {"season_index": 2, "group_label": "Season 2", "parts": [{"short_label": "S2", "title": "Solo Leveling Season 2 -Arise from the Shadow-", "mal_id": 58567, "anilist_id": 176496, "start_date": {"year": 2025, "month": 1, "day": 5}, "format": "TV", "is_final": false}]}], "extras": [{"title": "Solo Leveling -ReAwakening-", "mal_id": 59841, "anilist_id": 184694, "format": "MOVIE", "start_date": {"year": 2024, "month": 11, "day": 29}}]}, "_timestamp": 1775086915.503556}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -1 +0,0 @@
{"payload": {"entries": [], "season_groups": [], "extras": []}, "_timestamp": 1774915396.871178}

View File

@@ -1 +0,0 @@
{"payload": {"entries": [{"anilist_id": 171627, "mal_id": 57555, "title_romaji": "Chainsaw Man: Reze-hen", "title_english": "Chainsaw Man \u2013 The Movie: Reze Arc", "title_native": "\u30c1\u30a7\u30f3\u30bd\u30fc\u30de\u30f3 \u30ec\u30bc\u7bc7", "format": "MOVIE", "start_date": {"year": 2025, "month": 9, "day": 19}, "season": "SUMMER", "season_year": 2025, "episodes": 1, "relation_type_from_parent": null, "inferred_part": null, "is_season": false, "is_final_from_title": false, "title_display": "Chainsaw Man \u2013 The Movie: Reze Arc", "_start_tuple": [2025, 9, 19], "is_final_season": false}, {"anilist_id": 204429, "mal_id": 63143, "title_romaji": "Chainsaw Man: Shikaku-hen", "title_english": null, "title_native": "\u30c1\u30a7\u30f3\u30bd\u30fc\u30de\u30f3 \u523a\u5ba2\u7bc7", "format": null, "start_date": {"year": null, "month": null, "day": null}, "season": null, "season_year": null, "episodes": null, "relation_type_from_parent": null, "inferred_part": null, "is_season": false, "is_final_from_title": false, "title_display": "Chainsaw Man: Shikaku-hen", "_start_tuple": [0, 0, 0], "is_final_season": false}, {"anilist_id": 127230, "mal_id": 44511, "title_romaji": "Chainsaw Man", "title_english": "Chainsaw Man", "title_native": "\u30c1\u30a7\u30f3\u30bd\u30fc\u30de\u30f3", "format": "TV", "start_date": {"year": 2022, "month": 10, "day": 12}, "season": "FALL", "season_year": 2022, "episodes": 12, "relation_type_from_parent": null, "inferred_part": null, "is_season": true, "is_final_from_title": false, "title_display": "Chainsaw Man", "_start_tuple": [2022, 10, 12], "is_final_season": false}], "season_groups": [{"season_index": 1, "group_label": "Season 1", "parts": [{"short_label": "S1", "title": "Chainsaw Man", "mal_id": 44511, "anilist_id": 127230, "start_date": {"year": 2022, "month": 10, "day": 12}, "format": "TV", "is_final": false}]}], "extras": [{"title": "Chainsaw Man: Shikaku-hen", "mal_id": 63143, "anilist_id": 204429, "format": null, "start_date": {"year": null, "month": null, "day": null}}, {"title": "Chainsaw Man \u2013 The Movie: Reze Arc", "mal_id": 57555, "anilist_id": 171627, "format": "MOVIE", "start_date": {"year": 2025, "month": 9, "day": 19}}]}, "_timestamp": 1775008693.3776262}

File diff suppressed because one or more lines are too long

View File

@@ -1 +0,0 @@
{"payload": {"mal_id": 121948, "url": "https://myanimelist.net/manga/121948/Dokusen_shitemo_Ii_desu_ka", "images": {"jpg": {"image_url": "https://myanimelist.net/images/manga/2/235219.jpg", "small_image_url": "https://myanimelist.net/images/manga/2/235219t.jpg", "large_image_url": "https://myanimelist.net/images/manga/2/235219l.jpg"}, "webp": {"image_url": "https://myanimelist.net/images/manga/2/235219.webp", "small_image_url": "https://myanimelist.net/images/manga/2/235219t.webp", "large_image_url": "https://myanimelist.net/images/manga/2/235219l.webp"}}, "approved": true, "titles": [{"type": "Default", "title": "Dokusen shitemo Ii desu ka"}, {"type": "Synonym", "title": "Will You Be Mine?"}, {"type": "Japanese", "title": "\u72ec\u5360\u3057\u3066\u3082\u3044\u3044\u3067\u3059\u304b"}, {"type": "English", "title": "Can I Have You All to Myself?"}], "title": "Dokusen shitemo Ii desu ka", "title_english": "Can I Have You All to Myself?", "title_japanese": "\u72ec\u5360\u3057\u3066\u3082\u3044\u3044\u3067\u3059\u304b", "title_synonyms": ["Will You Be Mine?"], "type": "Manga", "chapters": 7, "volumes": 1, "status": "Finished", "publishing": false, "published": {"from": "2017-04-28T00:00:00+00:00", "to": "2017-10-31T00:00:00+00:00", "prop": {"from": {"day": 28, "month": 4, "year": 2017}, "to": {"day": 31, "month": 10, "year": 2017}}, "string": "Apr 28, 2017 to Oct 31, 2017"}, "score": 6.74, "scored": 6.74, "scored_by": 136, "rank": null, "popularity": 32965, "members": 333, "favorites": 0, "synopsis": "Yuu and Ryou have been best friends since they were kids. Yuu is not the most approachable guy, unlike Ryou, who is very popular. It doesn't really bother Yuu that he's the outcast of the class, and he even admits that Ryou is the only person he feels comfortable talking to. But then, Ryou is asked to substitute for a player on the basketball team, meaning that he is not able to spend as much time with Yuu... Yuu is unhappy about this, and he cannot help but lash out at Ryou. Could Yuu's extremely possessive feelings be something more? Is their relationship really just about friendship?\n\n(Source: Renta!)", "background": "Dokusen shitemo Ii desu ka was published digitally in English as Can I Have You All to Myself? by Renta!", "authors": [{"mal_id": 52232, "type": "people", "name": "Kogamo", "url": "https://myanimelist.net/people/52232/Kogamo"}], "serializations": [], "genres": [{"mal_id": 28, "type": "manga", "name": "Boys Love", "url": "https://myanimelist.net/manga/genre/28/Boys_Love"}, {"mal_id": 49, "type": "manga", "name": "Erotica", "url": "https://myanimelist.net/manga/genre/49/Erotica"}], "explicit_genres": [], "themes": [{"mal_id": 23, "type": "manga", "name": "School", "url": "https://myanimelist.net/manga/genre/23/School"}], "demographics": [], "relations": [], "external": []}, "_timestamp": 1775087082.701801}

File diff suppressed because one or more lines are too long

View File

@@ -1,8 +1,8 @@
{ {
"name": "Comix Reader", "name": "Comix Reader",
"version": "1.0.6", "version": "1.0.7",
"author": "Animex", "author": "Animex",
"description": "Comix.to Manga Reader - Double-Safe Nested Data Parsing.", "description": "Comix.to Manga Reader - Fixed WAF validation, URL encoding, and Anti-Leech headers.",
"type": "MANGA_READER", "type": "MANGA_READER",
"requirements": ["httpx", "re", "json"] "requirements": ["httpx", "re", "json"]
} }
@@ -13,12 +13,14 @@ import urllib.parse
import inspect import inspect
import httpx import httpx
# Exact headers from your working test client # Extended headers to satisfy WAF & Anti-Leech checks
HEADERS = { HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
'Accept': 'application/json, text/plain, */*', 'Accept': 'application/json, text/plain, */*',
'Referer': 'https://comix.to/' 'Referer': 'https://comix.to/',
'X-Requested-With': 'XMLHttpRequest'
} }
async def _smart_fetch(method: str, url: str, **kwargs): async def _smart_fetch(method: str, url: str, **kwargs):
"""Uses injected HybridClient if available, otherwise falls back to real httpx.""" """Uses injected HybridClient if available, otherwise falls back to real httpx."""
client_or_lib = globals().get('httpx') client_or_lib = globals().get('httpx')
@@ -26,21 +28,14 @@ async def _smart_fetch(method: str, url: str, **kwargs):
func = getattr(client_or_lib, method.lower(), None) func = getattr(client_or_lib, method.lower(), None)
if func and inspect.iscoroutinefunction(func): if func and inspect.iscoroutinefunction(func):
resp = await func(url, **kwargs) resp = await func(url, **kwargs)
# Debug: dump raw response to see what we're actually getting
raw = getattr(resp, 'text', '') or ''
print(f"[Comix] _smart_fetch response preview: {raw[:300]}")
return resp return resp
# Fallback: real httpx.AsyncClient # Fallback: real httpx.AsyncClient
import httpx as _real_httpx import httpx as _real_httpx
async with _real_httpx.AsyncClient(follow_redirects=True) as client: async with _real_httpx.AsyncClient(follow_redirects=True) as client:
resp = await getattr(client, method.lower())(url, **kwargs) resp = await getattr(client, method.lower())(url, **kwargs)
print(f"[Comix] _smart_fetch (direct) response preview: {resp.text[:300]}")
return resp return resp
def get_nested(data, *keys, default=None): def get_nested(data, *keys, default=None):
"""Helper to safely traverse deeply nested dictionaries even if keys are None.""" """Helper to safely traverse deeply nested dictionaries even if keys are None."""
for key in keys: for key in keys:
@@ -65,15 +60,19 @@ async def get_title_from_mal(mal_id: int):
return None return None
async def search_manga(query: str): async def search_manga(query: str):
"""Searches Comix.to and returns (hash_id, slug).""" """Searches Comix.to and returns (manga_id, hash_id, slug)."""
if not query: return None if not query: return None
url = f"https://comix.to/api/v2/manga?keyword={urllib.parse.quote(query)}&order[relevance]=desc"
# Safely URL encode parameters (prevents WAF bracket blocks)
params = {"keyword": query, "order[relevance]": "desc"}
qs = urllib.parse.urlencode(params)
url = f"https://comix.to/api/v2/manga?{qs}"
try: try:
resp = await _smart_fetch("GET", url, headers=HEADERS) resp = await _smart_fetch("GET", url, headers=HEADERS)
print(f"[Comix] Search status: {resp.status_code}") print(f"[Comix] Search status: {getattr(resp, 'status_code', 500)}")
data = resp.json() if hasattr(resp, "json") else None data = resp.json() if hasattr(resp, "json") else None
if data is None: if data is None:
print(f"[Comix] Search response was not JSON. Raw: {resp.text[:200]}")
return None return None
# Safely get first item from result -> items # Safely get first item from result -> items
@@ -81,7 +80,8 @@ async def search_manga(query: str):
if items and isinstance(items, list) and len(items) > 0: if items and isinstance(items, list) and len(items) > 0:
first = items[0] first = items[0]
if isinstance(first, dict): if isinstance(first, dict):
return first.get('hash_id'), first.get('slug') # Return manga_id too for WAF API fallback purposes
return first.get('manga_id'), first.get('hash_id'), first.get('slug')
except Exception as e: except Exception as e:
print(f"[Comix] Search Error: {e}") print(f"[Comix] Search Error: {e}")
return None return None
@@ -91,30 +91,47 @@ async def get_chapters(mal_id: int):
print(f"[Comix] get_chapters called for MAL ID: {mal_id}") print(f"[Comix] get_chapters called for MAL ID: {mal_id}")
title = await get_title_from_mal(mal_id) title = await get_title_from_mal(mal_id)
print(f"[Comix] Resolved title: {title}")
if not title: return None if not title: return None
manga_info = await search_manga(title) manga_info = await search_manga(title)
print(f"[Comix] Search result: {manga_info}")
if not manga_info: return None if not manga_info: return None
hash_id, slug = manga_info manga_id, hash_id, slug = manga_info
# Paginate since API caps at 100 per request # Paginate since API caps at 100 per request
all_items = []
offset = 0
seen_ids = set()
all_items = [] all_items = []
page = 1 page = 1
while True: while True:
url = f"https://comix.to/api/v2/manga/{hash_id}/chapters?order[number]=asc&limit=100&page={page}" # Dynamically spoof the Referer for the specific manga to bypass Anti-Leech
resp = await _smart_fetch("GET", url, headers=HEADERS) req_headers = HEADERS.copy()
req_headers["Referer"] = f"https://comix.to/title/{hash_id}-{slug}"
# Safely encode the query string to prevent 403 Bracket Rejection
params = {
"order[number]": "asc",
"limit": 100,
"page": page
}
qs = urllib.parse.urlencode(params)
# Primary Try: Try requesting chapters using the Hash ID
url = f"https://comix.to/api/v2/manga/{hash_id}/chapters?{qs}&_=xQm9tJfLwGhz_0Eq8S_YAHYkwp-q1PLfm50W5QJnyd1NnNYpAjXjyCoAzoOLRgUaJOoxWS0NeDGz_rNrbqBjLLP1H9qi"
resp = await _smart_fetch("GET", url, headers=req_headers)
data = resp.json() if hasattr(resp, "json") else None data = resp.json() if hasattr(resp, "json") else None
# Fallback: If framework validation fails (400, 403, 404), it might be strictly
# expecting the internal integer primary key instead of the string hash.
if data and data.get("status") in [400, 403, 404]:
print(f"[Comix] Hash ID rejected ({data.get('status')}). Falling back to Integer Manga ID...")
url_fallback = f"https://comix.to/api/v2/manga/{manga_id}/chapters?{qs}&_=xQm9tJfLwGhz_0Eq8S_YAHYkwp-q1PLfm50W5QJnyd1NnNYpAjXjyCoAzoOLRgUaJOoxWS0NeDGz_rNrbqBjLLP1H9qi"
resp = await _smart_fetch("GET", url_fallback, headers=req_headers)
data = resp.json() if hasattr(resp, "json") else None
if data is None or data.get("status") != 200: if data is None or data.get("status") != 200:
print(f"[Comix] Bad response on page {page}: {data.get('message') if data else resp.text[:200]}") error_msg = data.get('message') if data else getattr(resp, 'text', '')[:200]
print(f"[Comix] Bad response on page {page}: {error_msg}")
break break
items = get_nested(data, 'result', 'items', default=[]) items = get_nested(data, 'result', 'items', default=[])
@@ -137,11 +154,15 @@ async def get_chapters(mal_id: int):
for item in all_items: for item in all_items:
if not isinstance(item, dict): continue if not isinstance(item, dict): continue
num = str(item.get('number', '0')) num = str(item.get('number', '0'))
# Safely fallback to any available ID key
c_id = item.get('chapter_id') or item.get('id') or item.get('hash_id')
if num not in seen_numbers: if num not in seen_numbers:
seen_numbers[num] = True seen_numbers[num] = True
formatted.append({ formatted.append({
"title": item.get('name') or f"Chapter {num}", "title": item.get('name') or f"Chapter {num}",
"url": f"{hash_id}:{slug}:{item.get('chapter_id')}", "url": f"{hash_id}:{slug}:{c_id}",
"chapter_number": num, "chapter_number": num,
"is_external": False "is_external": False
}) })
@@ -185,7 +206,11 @@ async def get_chapter_images(mal_id: int, chapter_num: str):
hash_id, slug, chapter_id = target_chapter["url"].split(":") hash_id, slug, chapter_id = target_chapter["url"].split(":")
url = f"https://comix.to/title/{hash_id}-{slug}/{chapter_id}-chapter-{chapter_num}" url = f"https://comix.to/title/{hash_id}-{slug}/{chapter_id}-chapter-{chapter_num}"
resp = await _smart_fetch("GET", url, headers=HEADERS) # Mirror the Referer just as we do for chapters
req_headers = HEADERS.copy()
req_headers["Referer"] = f"https://comix.to/title/{hash_id}-{slug}"
resp = await _smart_fetch("GET", url, headers=req_headers)
if not resp or not hasattr(resp, "text"): return None if not resp or not hasattr(resp, "text"): return None
regex = r'["\\]*images["\\]*\s*:\s*(\[[^\]]*\])' regex = r'["\\]*images["\\]*\s*:\s*(\[[^\]]*\])'

338
modules/weebcentral.module Normal file
View File

@@ -0,0 +1,338 @@
{
"name": "WeebCentral",
"version": "1.0.0",
"author": "Animex",
"description": "WeebCentral.com Manga Reader — uses HTMX scraping endpoints for chapters and pages.",
"type": "MANGA_READER",
"requirements": ["httpx", "beautifulsoup4", "re"]
}
---
import re
import inspect
import httpx as _httpx_lib
try:
from bs4 import BeautifulSoup
except ImportError:
BeautifulSoup = None
BASE_URL = "https://weebcentral.com"
BROWSER_HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
"Accept": "text/html, */*",
}
# ---------------------------------------------------------------------------
# Internal HTTP helpers
# ---------------------------------------------------------------------------
def _get_hybrid():
"""Returns the injected HybridClient if present, else None."""
candidate = globals().get("httpx")
if candidate is not None and candidate is not _httpx_lib:
return candidate
return None
async def _get(url, headers=None, timeout=20):
"""GET via HybridClient tunnel when available, real httpx otherwise."""
hybrid = _get_hybrid()
if hybrid is not None:
func = getattr(hybrid, "get", None)
if func and inspect.iscoroutinefunction(func):
return await func(url, headers=headers, timeout=timeout)
async with _httpx_lib.AsyncClient(follow_redirects=True) as c:
return await c.get(url, headers=headers, timeout=timeout)
async def _post_form(url, data: dict, extra_headers: dict = None, timeout=20):
"""
Form-encoded POST. The HybridClient tunnel only speaks JSON bodies, so
form POSTs always go through real httpx directly — this is intentional.
"""
merged = {**BROWSER_HEADERS, **(extra_headers or {})}
merged["Content-Type"] = "application/x-www-form-urlencoded"
async with _httpx_lib.AsyncClient(follow_redirects=True) as c:
return await c.post(url, data=data, headers=merged, timeout=timeout)
def _parse_html(text: str):
if BeautifulSoup is None:
raise RuntimeError("[WeebCentral] beautifulsoup4 is not installed.")
return BeautifulSoup(text, "html.parser")
# ---------------------------------------------------------------------------
# MAL title lookup
# ---------------------------------------------------------------------------
async def _get_mal_titles(mal_id: int):
"""Returns (romaji_title, english_title) from Jikan, or (None, None)."""
url = f"https://api.jikan.moe/v4/manga/{mal_id}"
try:
resp = await _get(url)
if getattr(resp, "status_code", 500) != 200:
return None, None
data = resp.json() if hasattr(resp, "json") else None
if not data:
return None, None
d = data.get("data", {})
return d.get("title"), d.get("title_english")
except Exception as e:
print(f"[WeebCentral] MAL fetch error: {e}")
return None, None
# ---------------------------------------------------------------------------
# WeebCentral search
# ---------------------------------------------------------------------------
async def _search(query: str):
"""
Searches WeebCentral via its HTMX quick-search endpoint.
Returns a list of {"id", "title", "url"} dicts.
"""
if not query:
return []
search_url = f"{BASE_URL}/search/simple?location=main"
htmx_headers = {
"HX-Request": "true",
"HX-Trigger": "quick-search-input",
"HX-Trigger-Name": "text",
"HX-Target": "quick-search-result",
"HX-Current-URL": f"{BASE_URL}/",
}
try:
resp = await _post_form(search_url, data={"text": query}, extra_headers=htmx_headers)
if getattr(resp, "status_code", 500) != 200:
print(f"[WeebCentral] Search returned status {resp.status_code}")
return []
soup = _parse_html(resp.text)
results = []
for a in soup.select("a"):
href = a.get("href", "")
if "/series/" not in href:
continue
title_el = a.select_one(".flex-1")
title = title_el.get_text(strip=True) if title_el else "Unknown"
id_match = re.search(r"/series/([^/]+)", href)
if not id_match:
continue
manga_id = id_match.group(1)
# Accept if either string is a substring of the other (case-insensitive)
q_lower, t_lower = query.lower(), title.lower()
if q_lower in t_lower or t_lower in q_lower:
results.append({"id": manga_id, "title": title, "url": href})
return results
except Exception as e:
print(f"[WeebCentral] Search error: {e}")
return []
# ---------------------------------------------------------------------------
# Chapter list
# ---------------------------------------------------------------------------
async def _get_chapters_for_series(manga_id: str):
"""
Returns a sorted list of chapter dicts:
{"id", "title", "chapter_number"}
Ordered ascending by chapter number.
"""
url = f"{BASE_URL}/series/{manga_id}/full-chapter-list"
htmx_headers = {
"HX-Request": "true",
"HX-Target": "chapter-list",
"HX-Current-URL": f"{BASE_URL}/series/{manga_id}",
"Referer": f"{BASE_URL}/series/{manga_id}",
}
try:
resp = await _get(url, headers={**BROWSER_HEADERS, **htmx_headers})
if getattr(resp, "status_code", 500) != 200:
return []
soup = _parse_html(resp.text)
chapters = []
for row in soup.select("div.flex.items-center"):
a = row.find("a")
if not a:
continue
href = a.get("href", "")
title_span = a.select_one("span.grow > span")
title = title_span.get_text(strip=True) if title_span else ""
id_match = re.search(r"/chapters/([^/]+)", href)
if not id_match:
continue
chapter_id = id_match.group(1)
num_match = re.search(r"(\d+(?:\.\d+)?)", title)
chapter_num = num_match.group(1) if num_match else "0"
chapters.append({
"id": chapter_id,
"title": title,
"chapter_number": chapter_num,
})
# Chapters come in descending order from the site — reverse to ascending
chapters.reverse()
return chapters
except Exception as e:
print(f"[WeebCentral] Chapter list error: {e}")
return []
# ---------------------------------------------------------------------------
# Page images
# ---------------------------------------------------------------------------
async def _get_pages(chapter_id: str):
"""Returns a list of image URLs for the given chapter ID."""
url = f"{BASE_URL}/chapters/{chapter_id}/images?is_prev=False&reading_style=long_strip"
htmx_headers = {
"HX-Request": "true",
"HX-Current-URL": f"{BASE_URL}/chapters/{chapter_id}",
"Referer": f"{BASE_URL}/chapters/{chapter_id}",
}
try:
resp = await _get(url, headers={**BROWSER_HEADERS, **htmx_headers})
if getattr(resp, "status_code", 500) != 200:
return []
soup = _parse_html(resp.text)
imgs = soup.select("section.flex-1 img") or soup.find_all("img")
pages = [img.get("src") for img in imgs if img.get("src")]
return pages
except Exception as e:
print(f"[WeebCentral] Page fetch error: {e}")
return []
# ---------------------------------------------------------------------------
# Public API — called by app.py
# ---------------------------------------------------------------------------
async def _resolve_series_id(mal_id: int):
"""Shared helper: MAL ID → WeebCentral series ID, or None."""
romaji, english = await _get_mal_titles(mal_id)
if not romaji and not english:
print("[WeebCentral] Could not resolve titles from MAL.")
return None
for title in filter(None, [english, romaji]):
results = await _search(title)
if results:
sid = results[0]["id"]
print(f"[WeebCentral] Matched series '{results[0]['title']}' (id={sid})")
return sid
print(f"[WeebCentral] No results for '{title}', trying next…")
print(f"[WeebCentral] No series found for MAL ID {mal_id}.")
return None
async def get_chapters(mal_id: int):
"""
Called by app.py /chapters/{mal_id}.
Returns a list of chapter dicts compatible with the app's module interface:
[{"title", "url", "chapter_number", "is_external"}, ...]
Sorted descending (newest first), or None on failure.
"""
print(f"[WeebCentral] get_chapters called — MAL {mal_id}")
series_id = await _resolve_series_id(mal_id)
if not series_id:
return None
raw = await _get_chapters_for_series(series_id)
if not raw:
return None
# Expose the chapter_id inside `url` so get_chapter_images can re-use it
# without a second search round-trip (format: "wc:{series_id}:{chapter_id}")
formatted = []
for ch in raw:
formatted.append({
"title": ch["title"],
"url": f"wc:{series_id}:{ch['id']}",
"chapter_number": ch["chapter_number"],
"is_external": False,
})
# Return descending (newest first) — matches convention used by comix
formatted.sort(key=lambda x: _safe_float(x["chapter_number"]), reverse=True)
return formatted
def _safe_float(v):
try:
return float(v)
except (ValueError, TypeError):
return 0.0
async def get_chapter_images(mal_id: int, chapter_num: str):
"""
Called by app.py /retrieve/{mal_id}/{chapter_num}.
Reuses get_chapters() so the MAL -> search round-trip is not repeated.
Returns a list of image URL strings, or None on failure.
"""
print(f"[WeebCentral] get_chapter_images called -- MAL {mal_id}, chapter {chapter_num}")
# 1. Get full chapter list (handles MAL lookup + search internally)
chapters = await get_chapters(mal_id)
if not chapters:
print("[WeebCentral] Chapter list is empty.")
return None
# 2. Find the target chapter by number
target = None
try:
target_f = float(chapter_num)
except (ValueError, TypeError):
target_f = None
for ch in chapters:
if target_f is not None:
try:
if float(ch["chapter_number"]) == target_f:
target = ch
break
except (ValueError, TypeError):
pass
if ch["chapter_number"] == str(chapter_num):
target = ch
break
if not target:
print(f"[WeebCentral] Chapter {chapter_num} not found.")
return None
# 3. Unpack chapter ID from url field ("wc:{series_id}:{chapter_id}")
try:
_, _series_id, chapter_id = target["url"].split(":")
except ValueError:
print(f"[WeebCentral] Malformed url field: {target['url']}")
return None
print(f"[WeebCentral] Fetching pages for: {target['title']} (id={chapter_id})")
# 4. Fetch page images
pages = await _get_pages(chapter_id)
print(f"[WeebCentral] Found {len(pages)} pages." if pages else "[WeebCentral] No pages extracted.")
return pages if pages else None

225
test.py Normal file
View File

@@ -0,0 +1,225 @@
import requests
from bs4 import BeautifulSoup
import re
import logging
import json
import os
import time
# Configure Console Logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger("WeebCentralTest")
LOG_FILE = "responses_test.txt"
def log_to_file(url, method, status, req_headers, req_body, resp_text):
"""Logs full request and response details to responses_test.txt."""
try:
with open(LOG_FILE, "a", encoding="utf-8") as f:
f.write("="*100 + "\n")
f.write(f"URL: {url}\n")
f.write(f"METHOD: {method}\n")
f.write(f"STATUS: {status}\n")
f.write("-" * 40 + "\n")
f.write("REQUEST HEADERS:\n")
for k, v in req_headers.items():
f.write(f" {k}: {v}\n")
if req_body:
f.write("-" * 40 + "\n")
f.write(f"REQUEST BODY: {req_body}\n")
f.write("-" * 40 + "\n")
f.write("RESPONSE BODY:\n")
f.write(resp_text if resp_text else "[EMPTY BODY]")
f.write("\n" + "="*100 + "\n\n")
except Exception as e:
print(f"Logging error: {e}")
class WeebCentralProvider:
def __init__(self):
self.base_url = "https://weebcentral.com"
self.session = requests.Session()
self.session.headers.update({
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
})
def get_mal_titles(self, mal_id):
"""Fetches titles from MyAnimeList API."""
logger.info(f"Fetching MAL metadata for ID: {mal_id}")
url = f"https://api.jikan.moe/v4/manga/{mal_id}"
resp = requests.get(url)
# Log MAL response too
log_to_file(url, "GET", resp.status_code, resp.request.headers, None, resp.text)
if resp.status_code != 200:
logger.error(f"MAL API failed with status {resp.status_code}")
return None, None
data = resp.json().get('data', {})
romaji = data.get('title')
english = data.get('title_english')
return romaji, english
def search(self, query: str):
if not query: return []
logger.info(f"--- Searching WeebCentral for: '{query}' ---")
search_url = f"{self.base_url}/search/simple?location=main"
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"HX-Request": "true",
"HX-Trigger": "quick-search-input",
"HX-Trigger-Name": "text",
"HX-Target": "quick-search-result",
"HX-Current-URL": f"{self.base_url}/",
}
data = {"text": query}
response = self.session.post(search_url, headers=headers, data=data)
# Log Search details
log_to_file(search_url, "POST", response.status_code, {**self.session.headers, **headers}, data, response.text)
if response.status_code != 200:
return []
soup = BeautifulSoup(response.text, "html.parser")
results = []
items = soup.select("a")
for item in items:
link = item.get("href", "")
if "/series/" not in link: continue
title_el = item.select_one(".flex-1")
title = title_el.get_text(strip=True) if title_el else "Unknown"
id_match = re.search(r"/series/([^/]+)", link)
manga_id = id_match.group(1) if id_match else None
# Logic: If query is in the site title, or vice-versa
if manga_id and (query.lower() in title.lower() or title.lower() in query.lower()):
results.append({
"id": manga_id,
"title": title,
"url": link
})
return results
def find_chapters(self, manga_id: str):
logger.info(f"--- Finding Chapters for Manga ID: {manga_id} ---")
chapter_url = f"{self.base_url}/series/{manga_id}/full-chapter-list"
headers = {
"HX-Request": "true",
"HX-Target": "chapter-list",
"HX-Current-URL": f"{self.base_url}/series/{manga_id}",
"Referer": f"{self.base_url}/series/{manga_id}",
}
response = self.session.get(chapter_url, headers=headers)
log_to_file(chapter_url, "GET", response.status_code, {**self.session.headers, **headers}, None, response.text)
soup = BeautifulSoup(response.text, "html.parser")
chapters = []
rows = soup.select("div.flex.items-center")
for row in rows:
a = row.find("a")
if not a: continue
href = a.get("href", "")
title_span = a.select_one("span.grow > span")
title = title_span.get_text(strip=True) if title_span else ""
id_match = re.search(r"/chapters/([^/]+)", href)
if not id_match: continue
num_match = re.search(r"(\d+(?:\.\d+)?)", title)
chapter_num = num_match.group(1) if num_match else "0"
chapters.append({
"id": id_match.group(1),
"title": title,
"chapter": chapter_num
})
chapters.reverse()
return chapters
def find_chapter_pages(self, chapter_id: str):
logger.info(f"--- Finding Pages for Chapter ID: {chapter_id} ---")
url = f"{self.base_url}/chapters/{chapter_id}/images?is_prev=False&reading_style=long_strip"
headers = {
"HX-Request": "true",
"HX-Current-URL": f"{self.base_url}/chapters/{chapter_id}",
"Referer": f"{self.base_url}/chapters/{chapter_id}",
}
response = self.session.get(url, headers=headers)
log_to_file(url, "GET", response.status_code, {**self.session.headers, **headers}, None, response.text)
soup = BeautifulSoup(response.text, "html.parser")
images = soup.select("section.flex-1 img") or soup.find_all("img")
pages = []
for idx, img in enumerate(images):
src = img.get("src")
if src: pages.append(src)
return pages
def run_test():
TEST_MAL_ID = 23390 # Attack on Titan
if os.path.exists(LOG_FILE):
os.remove(LOG_FILE)
provider = WeebCentralProvider()
# 1. Get titles from MAL
romaji, english = provider.get_mal_titles(TEST_MAL_ID)
if not romaji and not english:
logger.error("Could not fetch titles from MAL.")
return
# 2. Try Search (First English, then Romaji)
search_results = []
for title in [english, romaji]:
if not title: continue
search_results = provider.search(title)
if search_results:
break
logger.warning(f"No results for '{title}', trying next title...")
if not search_results:
logger.error(f"Failed to find manga on WeebCentral for ID {TEST_MAL_ID}. Check {LOG_FILE}")
return
target = search_results[0]
logger.info(f"Matched: {target['title']} (ID: {target['id']})")
# 3. Get Chapters
chapters = provider.find_chapters(target['id'])
if not chapters:
logger.error("Chapter list extraction failed.")
return
logger.info(f"Found {len(chapters)} chapters. Fetching pages for last chapter: {chapters[-1]['title']}")
# 4. Get Pages
pages = provider.find_chapter_pages(chapters[-1]['id'])
if pages:
logger.info(f"SUCCESS! Found {len(pages)} pages.")
logger.info(f"Full transaction logs: {os.path.abspath(LOG_FILE)}")
else:
logger.error("Failed to extract pages.")
if __name__ == "__main__":
run_test()