Repos / pytaku / ff043331a2
commit ff043331a2580a1adb65fa3ba200abbecb2d4658
Author: Bùi Thành Nhân <hi@imnhan.com>
Date:   Sun Aug 9 10:56:03 2020 +0700

    base Site class, add mangasee placeholder

diff --git a/pyproject.toml b/pyproject.toml
index 68b798e..2187bf6 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
 [tool.poetry]
 name = "pytaku"
-version = "0.2.12"
+version = "0.2.13"
 description = ""
 authors = ["Bùi Thành Nhân <hi@imnhan.com>"]
 license = "AGPL-3.0-only"
diff --git a/src/mangoapi/__init__.py b/src/mangoapi/__init__.py
index cbbc5d1..8fc7490 100644
--- a/src/mangoapi/__init__.py
+++ b/src/mangoapi/__init__.py
@@ -1,118 +1,17 @@
-import re
+from .mangadex import Mangadex
+from .mangasee import Mangasee
 
-import requests
+"""
+The mangoapi package is designed to be self-contained as if it was an external library.
+Each Site object represents a user session on source site.
+Instantiating and managing Site objects is the responsibility of the caller.
+"""
 
-# Titles regex slightly adapted from https://github.com/md-y/mangadex-full-api
-# Thanks!
-TITLES_PATTERN = re.compile(
-    r"""<a[^>]*href=["']\/title\/(\d+)\/\S+["'][^>]*manga_title[^>]*>([^<]*)<"""
-)
+SITES = {
+    "mangadex": Mangadex,
+    "mangasee": Mangasee,
+}
 
 
-def _parse_chapter_number(string):
-    if string == "":
-        # most likely a oneshot
-        return {"number": ""}
-    nums = string.split(".")
-    count = len(nums)
-    assert count == 1 or count == 2
-    result = {"number": string}
-    result["num_major"] = int(nums[0])
-    if count == 2:
-        result["num_minor"] = int(nums[1])
-    return result
-
-
-def _extract_groups(chap):
-    return [
-        group.strip()
-        for group in [chap["group_name"], chap["group_name_2"], chap["group_name_3"]]
-        if group
-    ]
-
-
-def get_title(title_id):
-    url = f"https://mangadex.org/api/?id={title_id}&type=manga"
-    md_resp = requests.get(url)
-    assert md_resp.status_code == 200, md_resp.text
-    md_json = md_resp.json()
-    assert md_json["status"] == "OK"
-
-    cover = md_json["manga"]["cover_url"].split("/")[-1]
-    cover_ext = cover[cover.find(".") + 1 : cover.rfind("?")]
-
-    title = {
-        "id": title_id,
-        "name": md_json["manga"]["title"],
-        "cover_ext": cover_ext,
-        "alt_names": md_json["manga"]["alt_names"],
-        "descriptions": md_json["manga"]["description"].split("\r\n\r\n"),
-        "chapters": [
-            {
-                "id": str(chap_id),
-                "name": chap["title"],
-                "volume": int(chap["volume"]) if chap["volume"] else None,
-                "groups": _extract_groups(chap),
-                **_parse_chapter_number(chap["chapter"]),
-            }
-            for chap_id, chap in md_json.get("chapter", {}).items()
-            if chap["lang_code"] == "gb" and chap["group_name"] != "MangaPlus"
-        ],
-    }
-    return title
-
-
-def get_chapter(chapter_id):
-    md_resp = requests.get(
-        f"https://mangadex.org/api/?id={chapter_id}&type=chapter&saver=0"
-    )
-    assert md_resp.status_code == 200, md_resp.text
-    md_json = md_resp.json()
-    assert md_json["status"] == "OK"
-
-    server = md_json.get("server_fallback") or md_json["server"]
-    img_path = f"{server}{md_json['hash']}"
-
-    chapter = {
-        "id": chapter_id,
-        "title_id": md_json["manga_id"],
-        "name": md_json["title"],
-        "pages": [f"{img_path}/{page}" for page in md_json["page_array"]],
-        "groups": _extract_groups(md_json),
-        "is_webtoon": md_json["long_strip"] == 1,
-        **_parse_chapter_number(md_json["chapter"]),
-    }
-    return chapter
-
-
-def login(username, password):
-    """
-    Returns cookies of a logged in user.
-    """
-    form_data = {
-        "login_username": username,
-        "login_password": password,
-        "two_factor": "",
-        "remember_me": "1",
-    }
-    md_resp = requests.post(
-        "https://mangadex.org/ajax/actions.ajax.php?function=login",
-        data=form_data,
-        headers={"X-Requested-With": "XMLHttpRequest"},
-    )
-    assert md_resp.status_code == 200, md_resp.text
-    return dict(md_resp.cookies)
-
-
-def search_title(user_cookies, query):
-    md_resp = requests.get(
-        f"https://mangadex.org/quick_search/{query}", cookies=user_cookies,
-    )
-    assert md_resp.status_code == 200, md_resp.text
-
-    matches = TITLES_PATTERN.findall(md_resp.text)
-    titles = [
-        {"id": int(id), "name": name.strip(), "site": "mangadex"}
-        for id, name in matches
-    ]
-    return titles
+def get_site_class(name):
+    return SITES.get(name)
diff --git a/src/mangoapi/base_site.py b/src/mangoapi/base_site.py
new file mode 100644
index 0000000..79fa6d1
--- /dev/null
+++ b/src/mangoapi/base_site.py
@@ -0,0 +1,42 @@
+import functools
+from abc import ABC, abstractmethod
+
+
+class Site(ABC):
+    def __init__(self):
+        self._cookies = None
+        self.username = None
+        self.password = None
+
+    @abstractmethod
+    def get_title(self, title_id):
+        pass
+
+    @abstractmethod
+    def get_chapter(self, chapter_id):
+        pass
+
+    @abstractmethod
+    def search_title(self, query):
+        pass
+
+    # optional abstract method
+    def login(self, username, password):
+        raise NotImplementedError()
+
+
+def requires_login(func):
+    """
+    Decorator designed for use on a Site's instance methods.
+    It ensures cookies are ready before running the method.
+    """
+
+    @functools.wraps(func)
+    def wrapper(self, *args, **kwargs):
+        if self._cookies is None:
+            assert self.username
+            assert self.password
+            self._cookies = self.login(self.username, self.password)
+        return func(self, *args, **kwargs)
+
+    return wrapper
diff --git a/src/mangoapi/mangadex.py b/src/mangoapi/mangadex.py
new file mode 100644
index 0000000..c78103b
--- /dev/null
+++ b/src/mangoapi/mangadex.py
@@ -0,0 +1,120 @@
+import re
+
+import requests
+
+from mangoapi.base_site import Site, requires_login
+
+
+class Mangadex(Site):
+    def get_title(self, title_id):
+        url = f"https://mangadex.org/api/?id={title_id}&type=manga"
+        md_resp = requests.get(url)
+        assert md_resp.status_code == 200, md_resp.text
+        md_json = md_resp.json()
+        assert md_json["status"] == "OK"
+
+        cover = md_json["manga"]["cover_url"].split("/")[-1]
+        cover_ext = cover[cover.find(".") + 1 : cover.rfind("?")]
+
+        title = {
+            "id": title_id,
+            "name": md_json["manga"]["title"],
+            "cover_ext": cover_ext,
+            "alt_names": md_json["manga"]["alt_names"],
+            "descriptions": md_json["manga"]["description"].split("\r\n\r\n"),
+            "chapters": [
+                {
+                    "id": str(chap_id),
+                    "name": chap["title"],
+                    "volume": int(chap["volume"]) if chap["volume"] else None,
+                    "groups": _extract_groups(chap),
+                    **_parse_chapter_number(chap["chapter"]),
+                }
+                for chap_id, chap in md_json.get("chapter", {}).items()
+                if chap["lang_code"] == "gb" and chap["group_name"] != "MangaPlus"
+            ],
+        }
+        return title
+
+    def get_chapter(self, chapter_id):
+        md_resp = requests.get(
+            f"https://mangadex.org/api/?id={chapter_id}&type=chapter&saver=0"
+        )
+        assert md_resp.status_code == 200, md_resp.text
+        md_json = md_resp.json()
+        assert md_json["status"] == "OK"
+
+        server = md_json.get("server_fallback") or md_json["server"]
+        img_path = f"{server}{md_json['hash']}"
+
+        chapter = {
+            "id": chapter_id,
+            "title_id": md_json["manga_id"],
+            "name": md_json["title"],
+            "pages": [f"{img_path}/{page}" for page in md_json["page_array"]],
+            "groups": _extract_groups(md_json),
+            "is_webtoon": md_json["long_strip"] == 1,
+            **_parse_chapter_number(md_json["chapter"]),
+        }
+        return chapter
+
+    @requires_login
+    def search_title(self, query):
+        md_resp = requests.get(
+            f"https://mangadex.org/quick_search/{query}", cookies=self._cookies,
+        )
+        assert md_resp.status_code == 200, md_resp.text
+
+        matches = TITLES_PATTERN.findall(md_resp.text)
+        titles = [
+            {"id": int(id), "name": name.strip(), "site": "mangadex"}
+            for id, name in matches
+        ]
+        return titles
+
+    def login(self, username, password):
+        """
+        Returns cookies of a logged in user.
+        """
+        form_data = {
+            "login_username": username,
+            "login_password": password,
+            "two_factor": "",
+            "remember_me": "1",
+        }
+        md_resp = requests.post(
+            "https://mangadex.org/ajax/actions.ajax.php?function=login",
+            data=form_data,
+            headers={"X-Requested-With": "XMLHttpRequest"},
+        )
+        assert md_resp.status_code == 200, md_resp.text
+        return dict(md_resp.cookies)
+
+
+# Titles regex slightly adapted from https://github.com/md-y/mangadex-full-api
+# Thanks!
+TITLES_PATTERN = re.compile(
+    r"""<a[^>]*href=["']\/title\/(\d+)\/\S+["'][^>]*manga_title[^>]*>([^<]*)<"""
+)
+
+
+def _parse_chapter_number(string):
+    if string == "":
+        # most likely a oneshot
+        return {"number": ""}
+    nums = string.split(".")
+    count = len(nums)
+    assert count == 1 or count == 2
+    result = {"number": string}
+    result["num_major"] = int(nums[0])
+    if count == 2:
+        result["num_minor"] = int(nums[1])
+    return result
+
+
+def _extract_groups(chap):
+    return [
+        group.strip()
+        for group in [chap["group_name"], chap["group_name_2"], chap["group_name_3"]]
+        if group
+    ]
diff --git a/src/mangoapi/mangasee.py b/src/mangoapi/mangasee.py
new file mode 100644
index 0000000..0cfd85c
--- /dev/null
+++ b/src/mangoapi/mangasee.py
@@ -0,0 +1,12 @@
+from mangoapi.base_site import Site
+
+
+class Mangasee(Site):
+    def get_title(self, title_id):
+        pass
+
+    def get_chapter(self, chapter_id):
+        pass
+
+    def search_title(self, query):
+        return []
diff --git a/src/pytaku/main.py b/src/pytaku/main.py
index 11181ac..2642a42 100644
--- a/src/pytaku/main.py
+++ b/src/pytaku/main.py
@@ -13,9 +13,6 @@
     url_for,
 )
 
-from mangoapi import get_chapter, get_title, search_title
-
-from . import mangadex
 from .conf import config
 from .decorators import ensure_session_version, require_login, toggle_has_read
 from .persistence import (
@@ -30,6 +27,7 @@
     unfollow,
     verify_username_password,
 )
+from .source_sites import get_chapter, get_title, search_title_all_sites
 
 config.load()
 
@@ -210,11 +208,10 @@ def chapter_view(site, chapter_id):
 @ensure_session_version
 def search_view():
     query = request.args.get("q", "").strip()
-    titles = []
+    results = {}
     if query:
-        cookies = mangadex.get_cookies()
-        titles = search_title(cookies, query)
-    return render_template("search.html", titles=titles, query=query)
+        results = search_title_all_sites(query)
+    return render_template("search.html", results=results, query=query)
 
 
 @app.route("/proxy/<b64_url>")
diff --git a/src/pytaku/mangadex.py b/src/pytaku/mangadex.py
deleted file mode 100644
index f231611..0000000
--- a/src/pytaku/mangadex.py
+++ /dev/null
@@ -1,14 +0,0 @@
-from mangoapi import login
-from pytaku.conf import config
-
-_cookies = None
-
-
-def get_cookies():
-    global _cookies
-    if _cookies is None:
-        print("Logging in to mangadex")
-        _cookies = login(config.MANGADEX_USERNAME, config.MANGADEX_PASSWORD)
-    else:
-        print("Reusing mangadex cookies")
-    return _cookies
diff --git a/src/pytaku/scheduler.py b/src/pytaku/scheduler.py
index 69aa702..24cbaab 100644
--- a/src/pytaku/scheduler.py
+++ b/src/pytaku/scheduler.py
@@ -1,9 +1,8 @@
 import time
 from datetime import datetime, timedelta
 
-from mangoapi import get_title
-
 from .persistence import find_outdated_titles, save_title
+from .source_sites import get_title
 
 now = datetime.now
 
diff --git a/src/pytaku/source_sites.py b/src/pytaku/source_sites.py
new file mode 100644
index 0000000..a3cc247
--- /dev/null
+++ b/src/pytaku/source_sites.py
@@ -0,0 +1,46 @@
+from mangoapi import get_site_class
+
+from .conf import config
+
+"""
+This module adapts mangoapi's API to a more convenient one for app-wide use.
+States are all handled here, exposing only a functional API to the rest of the app.
+"""
+
+_site_objs = {}
+
+
+def _get_site(name):
+    global _site_objs
+    site = _site_objs.get(name)
+    if not site:
+        site_class = get_site_class(name)
+        assert site_class is not None
+        site = site_class()
+        if name == "mangadex":
+            site.username = config.MANGADEX_USERNAME
+            site.password = config.MANGADEX_PASSWORD
+    return site
+
+
+def get_chapter(site_name, chapter_id):
+    return _get_site(site_name).get_chapter(chapter_id)
+
+
+def get_title(site_name, title_id):
+    return _get_site(site_name).get_title(title_id)
+
+
+def search_title(site_name, query):
+    return _get_site(site_name).search_title(query)
+
+
+def search_title_all_sites(query):
+    """
+    Returns dict in the form of {site_name: List[Title]}
+    I should really look into proper type annotations huh.
+    """
+    return {
+        site_name: search_title(site_name, query)
+        for site_name in ("mangasee", "mangadex")
+    }
diff --git a/src/pytaku/templates/search.html b/src/pytaku/templates/search.html
index d0dd325..c38f548 100644
--- a/src/pytaku/templates/search.html
+++ b/src/pytaku/templates/search.html
@@ -10,6 +10,9 @@
 
 {% block head %}
 <style>
+  .site-heading {
+    text-transform: capitalize;
+  }
   .results {
     display: flex;
     flex-direction: row;
@@ -29,6 +32,10 @@
     width: 0;
     min-width: 100%;
   }
+
+  .result-text {
+    margin-bottom: 1rem;
+  }
 </style>
 {% endblock %}
 
@@ -38,10 +45,14 @@
   <h1>Please enter a search query above.</h1>
 
 {% else %}
+{% for site, titles in results.items() %}
+<div>
+  <h1 class="site-heading">{{ site }}</h1>
+
   {% if titles %}
-  <h1>Showing {{ titles | length }} result(s) for "{{ query }}":</h1>
+  <h2 class="result-text">Showing <strong>{{ titles | length }}</strong> result(s) for "{{ query }}":</h2>
   {% else %}
-  <h1>No results for "{{ query }}".</h1>
+  <h2 class="result-text">No results for "{{ query }}".</h2>
   {% endif %}
 
   <div class="results">
@@ -53,7 +64,8 @@ <h1>No results for "{{ query }}".</h1>
     </a>
   {% endfor %}
   </div>
-
+</div>
+{% endfor %}
 {% endif %}