Repos / pytaku / cc7cb80bfe
commit cc7cb80bfe27b35d467b32cf6532442e9da70f75
Author: Bùi Thành Nhân <hi@imnhan.com>
Date: Fri Jan 22 13:18:03 2021 +0700
use mangadex api v2
This API removed chapter's `long_strip` field for whatever reason.
Nagging for it on Discord.
diff --git a/pyproject.toml b/pyproject.toml
index 70e0a4f..23e9315 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "pytaku"
-version = "0.3.31"
+version = "0.3.32"
description = "Self-hostable web-based manga reader"
authors = ["Bùi Thành Nhân <hi@imnhan.com>"]
license = "AGPL-3.0-only"
diff --git a/src/mangoapi/mangadex.py b/src/mangoapi/mangadex.py
index da2f705..336bdfd 100644
--- a/src/mangoapi/mangadex.py
+++ b/src/mangoapi/mangadex.py
@@ -4,39 +4,46 @@
from mangoapi.base_site import Site, requires_login
+MANGAPLUS_GROUP_ID = 9097
+
class Mangadex(Site):
def get_title(self, title_id):
- url = f"https://mangadex.org/api/?id={title_id}&type=manga"
+ url = f"https://mangadex.org/api/v2/manga/{title_id}?include=chapters"
md_resp = self.http_get(url)
md_json = md_resp.json()
assert md_json["status"] == "OK"
+ manga = md_json["data"]["manga"]
+ chapters = md_json["data"]["chapters"]
+ groups = md_json["data"]["groups"]
+ groups_dict = {group["id"]: group["name"] for group in groups}
- cover = md_json["manga"]["cover_url"].split("/")[-1]
+ cover = manga["mainCover"].split("/")[-1]
cover_ext = cover[cover.find(".") + 1 : cover.rfind("?")]
current_timestamp = time.time()
title = {
"id": title_id,
- "name": md_json["manga"]["title"],
+ "name": manga["title"],
"site": "mangadex",
"cover_ext": cover_ext,
- "alt_names": md_json["manga"]["alt_names"],
- "descriptions": html.unescape(md_json["manga"]["description"]).split(
- "\r\n\r\n"
- ),
+ "alt_names": manga["altTitles"],
+ "descriptions": html.unescape(manga["description"]).split("\r\n\r\n"),
"chapters": [
{
- "id": str(chap_id),
+ "id": str(chap["id"]),
"name": chap["title"],
"volume": int(chap["volume"]) if chap["volume"] else None,
- "groups": _extract_groups(chap),
+ "groups": [
+ html.unescape(groups_dict[group_id])
+ for group_id in chap["groups"]
+ ],
**_parse_chapter_number(chap["chapter"]),
}
- for chap_id, chap in md_json.get("chapter", {}).items()
- if chap["lang_code"] == "gb"
- and chap["group_name"] != "MangaPlus"
+ for chap in chapters
+ if chap["language"] == "gb"
+ and MANGAPLUS_GROUP_ID not in chap["groups"]
and chap["timestamp"] <= current_timestamp
# ^ Chapter may be listed but with access delayed for a certain amount
# of time set by uploader, in which case we just filter it out. God I
@@ -47,45 +54,44 @@ def get_title(self, title_id):
def get_chapter(self, title_id, chapter_id):
md_resp = self.http_get(
- f"https://mangadex.org/api/?id={chapter_id}&type=chapter&saver=0"
+ f"https://mangadex.org/api/v2/chapter/{chapter_id}?saver=0"
)
md_json = md_resp.json()
assert md_json["status"] == "OK"
+ data = md_json["data"]
# 2 cases:
- # - If 'server_fallback' is absent, it means 'server' points to MD's own server
+ # - If 'serverFallback' is absent, it means 'server' points to MD's own server
# e.g. s5.mangadex.org...
# - Otherwise, 'server' points to a likely ephemeral MD@H node, while
- # 'server_fallback' now points to MD's own server.
+ # 'serverFallback' now points to MD's own server.
#
# MD's own links apparently go dead sometimes, but MD@H links seem to expire
# quickly all the time, so it's probably a good idea to store both anyway.
- server_fallback = md_json.get("server_fallback")
+ server_fallback = data.get("serverFallback")
if server_fallback:
md_server = server_fallback
- mdah_server = md_json["server"]
+ mdah_server = data["server"]
else:
- md_server = md_json["server"]
+ md_server = data["server"]
mdah_server = None
chapter = {
"id": chapter_id,
- "title_id": str(md_json["manga_id"]),
+ "title_id": str(data["mangaId"]),
"site": "mangadex",
- "name": md_json["title"],
- "pages": [
- f"{md_server}{md_json['hash']}/{page}" for page in md_json["page_array"]
- ],
+ "name": data["title"],
+ "pages": [f"{md_server}{data['hash']}/{page}" for page in data["pages"]],
"pages_alt": [
- f"{mdah_server}{md_json['hash']}/{page}"
- for page in md_json["page_array"]
+ f"{mdah_server}{data['hash']}/{page}" for page in data["pages"]
]
if mdah_server
else [],
- "groups": _extract_groups(md_json),
- "is_webtoon": md_json["long_strip"] == 1,
- **_parse_chapter_number(md_json["chapter"]),
+ "groups": [html.unescape(group["name"]) for group in data["groups"]],
+ # TODO: longStrip doesn't exist in v2 API yet. Nagging for it on Discord.
+ "is_webtoon": bool(data.get("longString")),
+ **_parse_chapter_number(data["chapter"]),
}
return chapter
@@ -148,11 +154,3 @@ def _parse_chapter_number(string):
if count == 2:
result["num_minor"] = int(nums[1])
return result
-
-
-def _extract_groups(chap):
- return [
- html.unescape(group.strip())
- for group in [chap["group_name"], chap["group_name_2"], chap["group_name_3"]]
- if group
- ]