diff --git a/assets/docs/cmd.html b/assets/docs/cmd.html index efd3572..2a2cfe3 100644 --- a/assets/docs/cmd.html +++ b/assets/docs/cmd.html @@ -36,6 +36,7 @@ .meta { font-size:12px; color:var(--muted); display:flex; gap:10px; flex-wrap:wrap; margin-top:4px; } .pill { display:inline-block; padding:2px 8px; border-radius:999px; font-size:12px; border:1px solid #2b4; } .pill.mod { border-color:#ef4444; color:#fecaca; } + .pill.admin { border-color:#a78bfa; color:#e9d5ff; } .pill.slash { border-color:#60a5fa; } .pill.prefix { border-color:#f59e0b; } .pill.hybrid { border-color:#34d399; } @@ -339,7 +340,7 @@ fdbtn?.addEventListener('click', closeFullDetails); const backdrop=document.getElementById('backdrop'); function shownName(r){ return (r.display_name||r.name||'').replace(/^\//,''); } - function helpSansMod(r){ return (r.help||'').replace(/^\s*\[MOD\]\s*/i,''); } + function helpSansMod(r){ return (r.help||'').replace(/^\s*\[(MOD|ADMIN)\]\s*/i,''); } function moduleSansPrefix(r){ const m=r.module||''; return m.replace(/^modules?\./,'').replace(/^discord\.ext\./,''); } async function shareFor(r){ @@ -375,6 +376,7 @@ fdbtn?.addEventListener('click', closeFullDetails);
${r.type} ${r.moderator_only?'mod':''} + ${r.admin_only?'admin':''} ${shownName(r)}
@@ -445,6 +447,7 @@ fdbtn?.addEventListener('click', closeFullDetails);
${r.type} ${r.moderator_only?'mod':''} + ${r.admin_only?'admin':''} ${shownName(r)}
diff --git a/assets/docs/commands/DDLootTableCog.dd_update.brief.html b/assets/docs/commands/DDLootTableCog.dd_update.brief.html new file mode 100644 index 0000000..7d26751 --- /dev/null +++ b/assets/docs/commands/DDLootTableCog.dd_update.brief.html @@ -0,0 +1,22 @@ +

/dd_update

+

Control the Deep Desert weekly loot updater. Stop/resume the scheduler or force a one-off start.

+ +

Usage

+
/dd_update <action> [reason]
+
    +
  • action — one of stop, resume, start.
  • +
  • reason (optional) — short note shown in the confirmation.
  • +
+ +

Permissions

+
    +
  • stop / resume: Server Owner or members with Manage Server.
  • +
  • start: Server Owner only (dangerous; bypasses the usual wait).
  • +
+ +

What it does

+
    +
  • stop — pauses all checks until resume.
  • +
  • resume — returns to the normal weekly cycle.
  • +
  • start — behaves as if the weekly reset just happened and begins polling immediately.
  • +
diff --git a/assets/docs/commands/DDLootTableCog.dd_update.details.html b/assets/docs/commands/DDLootTableCog.dd_update.details.html new file mode 100644 index 0000000..c4fb50e --- /dev/null +++ b/assets/docs/commands/DDLootTableCog.dd_update.details.html @@ -0,0 +1,50 @@ +

/dd_update — Deep Desert updater controls

+

Manage the weekly “Deep Desert — Weekly Uniques” message updater for this guild.

+ +

Usage

+
/dd_update <action> [reason]
+
    +
  • action — stop | resume | start
  • +
  • reason (optional) — free text appended to the confirmation response.
  • +
+ +

Permissions

+
    +
  • stop / resume: Server Owner or members with Manage Server.
  • +
  • start: Server Owner only. Use with care—this forces an immediate check cycle.
  • +
+ +

Behavior

+
    +
  • Weekly reset: Tuesdays 03:00 UTC. At reset, the bot updates the channel message to indicate it’s waiting for the new week.
  • +
  • Polling cadence: adaptive back-off until fresh data is found: +
      +
    • Every 5 min for the first hour
    • +
    • Then every 15 min until 3 hours
    • +
    • Then every 30 min until 6 hours
    • +
    • Then every 1 hour until 24 hours
    • +
    • Then every 3 hours
    • +
    +
  • +
  • Success: when new valid data appears, the bot updates the message once and idles until the next weekly reset.
  • +
  • Errors / no update yet: the message shows a generic notice that it’s still waiting or that an issue occurred (no external source is mentioned).
  • +
+ +

Actions

+
    +
  • stop — Pauses all checks. The message remains as-is until you resume.
  • +
  • resume — Returns to the normal weekly cycle (detect reset, then poll until new data).
  • +
  • start — Pretends the weekly reset just happened and begins polling now. Owner-only.
  • +
+ +

Channel & config

+
    +
  • Target channel: taken from SHAI_DD_CHANNEL_ID (env). If unset, defaults to 1404764793377652807.
  • +
  • Scope: guild-specific. The message lives in this server’s configured channel.
  • +
+ +

Notes

+
    +
  • This command only controls the updater; it does not manually edit the posted message.
  • +
  • On each successful weekly update, the bot compares content against the previous week to avoid duplicate posts.
  • +
diff --git a/assets/docs/commands/__commands__.json b/assets/docs/commands/__commands__.json index 2dc3c08..3984bae 100644 --- a/assets/docs/commands/__commands__.json +++ b/assets/docs/commands/__commands__.json @@ -87,5 +87,9 @@ "UserCardsCog.usercards_rescan": { "brief_html": "

/usercards_rescan

\n

Re-check everyone and refresh the user cards. Also repairs Roles/RoE/nickname claims from the live reaction messages, and re-opens any missing nickname reviews.

\n\n

Usage

\n
/usercards_rescan
\n\n
    \n
  • Moderator-only (requires Manage Server).
  • \n
  • Runs in the server; reply is ephemeral with a short summary.
  • \n
\n\n

What it does

\n
    \n
  • Reconciles from the configured Rules / RoE / Nickname reaction messages.
  • \n
  • Grants/removes the Rules & RoE roles to match reactions.
  • \n
  • For Nickname: opens a pending review if someone claimed but no review exists.
  • \n
  • Rebuilds/updates every user’s status card in the list channel.
  • \n
\n", "details_html": "

/usercards_rescan — Reconcile & refresh all cards

\n

One-shot maintenance pass that makes the server’s user cards match reality.

\n\n

Access

\n
    \n
  • Moderator-only — requires the Discord permission Manage Server.
  • \n
  • Must be used in a server channel (not DMs). The result is sent ephemerally.
  • \n
\n\n

What it fixes

\n
    \n
  1. Rules / RoE agreement\n
      \n \n
    • Adds/removes the corresponding roles so roles match the reactions.
    • \n
    \n
  2. \n
  3. Nickname claim & reviews\n
      \n
    • If a member has an “accept” reaction on the Nickname message but has no pending/verified record and no open review, it opens a pending nickname review for them.
    • \n
    • If a member removed their Nickname reaction, it clears any pending/verified state.
    • \n
    \n
  4. \n
  5. User cards\n
      \n
    • Updates (or recreates) the embed for each member in the configured “users list” channel.
    • \n
    • Card color reflects: Rules, RoE, and Nickname status (âś… verified / ✔️ pending / ❌ not done).
    • \n
    • Uses a stable footer marker (UID:<id>) to find/edit the right card; cleans up duplicates.
    • \n
    \n
  6. \n
\n\n

Expected output

\n

The command replies (ephemeral) with counts like:

\n
Reconciled from messages. Changes — Rules: 3, RoE: 2, Nickname (added): 1, Nickname (removed): 0. Refreshed cards for 154 members.
\n\n

Setup notes

\n
    \n
  • Relies on your configured IDs (ENV/INI): Rules/RoE/Nickname message IDs and their role IDs, the Full Access role, the user-cards channel, and the mod/modlog channels.
  • \n
  • Won’t ping anyone; all posts/edits are sent with no mentions.
  • \n
\n\n

Tips

\n
    \n
  • Run after importing a server, restoring from backup, or after downtime.
  • \n
  • Large servers: this may take a moment while it walks members and edits cards. It’s safe to run again.
  • \n
\n" + }, + "DDLootTableCog.dd_update": { + "brief_html": "

/dd_update

\n

Control the Deep Desert weekly loot updater. Stop/resume the scheduler or force a one-off start.

\n\n

Usage

\n
/dd_update <action> [reason]
\n
    \n
  • action — one of stop, resume, start.
  • \n
  • reason (optional) — short note shown in the confirmation.
  • \n
\n\n

Permissions

\n
    \n
  • stop / resume: Server Owner or members with Manage Server.
  • \n
  • start: Server Owner only (dangerous; bypasses the usual wait).
  • \n
\n\n

What it does

\n
    \n
  • stop — pauses all checks until resume.
  • \n
  • resume — returns to the normal weekly cycle.
  • \n
  • start — behaves as if the weekly reset just happened and begins polling immediately.
  • \n
\n", + "details_html": "

/dd_update — Deep Desert updater controls

\n

Manage the weekly “Deep Desert — Weekly Uniques” message updater for this guild.

\n\n

Usage

\n
/dd_update <action> [reason]
\n
    \n
  • action — stop | resume | start
  • \n
  • reason (optional) — free text appended to the confirmation response.
  • \n
\n\n

Permissions

\n
    \n
  • stop / resume: Server Owner or members with Manage Server.
  • \n
  • start: Server Owner only. Use with care—this forces an immediate check cycle.
  • \n
\n\n

Behavior

\n
    \n
  • Weekly reset: Tuesdays 03:00 UTC. At reset, the bot updates the channel message to indicate it’s waiting for the new week.
  • \n
  • Polling cadence: adaptive back-off until fresh data is found:\n
      \n
    • Every 5 min for the first hour
    • \n
    • Then every 15 min until 3 hours
    • \n
    • Then every 30 min until 6 hours
    • \n
    • Then every 1 hour until 24 hours
    • \n
    • Then every 3 hours
    • \n
    \n
  • \n
  • Success: when new valid data appears, the bot updates the message once and idles until the next weekly reset.
  • \n
  • Errors / no update yet: the message shows a generic notice that it’s still waiting or that an issue occurred (no external source is mentioned).
  • \n
\n\n

Actions

\n
    \n
  • stop — Pauses all checks. The message remains as-is until you resume.
  • \n
  • resume — Returns to the normal weekly cycle (detect reset, then poll until new data).
  • \n
  • start — Pretends the weekly reset just happened and begins polling now. Owner-only.
  • \n
\n\n

Channel & config

\n
    \n
  • Target channel: taken from SHAI_DD_CHANNEL_ID (env). If unset, defaults to 1404764793377652807.
  • \n
  • Scope: guild-specific. The message lives in this server’s configured channel.
  • \n
\n\n

Notes

\n
    \n
  • This command only controls the updater; it does not manually edit the posted message.
  • \n
  • On each successful weekly update, the bot compares content against the previous week to avoid duplicate posts.
  • \n
\n" } } \ No newline at end of file diff --git a/bot.py b/bot.py index 7252d1e..cab6675 100644 --- a/bot.py +++ b/bot.py @@ -9,7 +9,7 @@ from modules.common.boot_notice import post_boot_notice # Version consists of: # Major.Enhancement.Minor.Patch.Test (Test is alphanumeric; doesn’t trigger auto update) -VERSION = "0.4.1.0.a8" +VERSION = "0.4.2.0.a1" # ---------- Env loading ---------- @@ -101,6 +101,7 @@ async def _guild_selfcheck(g: discord.Guild, c): _need_channel('mod_channel_id', 'read_messages', 'send_messages', 'add_reactions', 'read_message_history') _need_channel('modlog_channel_id', 'read_messages', 'send_messages') _need_channel('pirates_list_channel_id', 'read_messages', 'send_messages') + _need_channel('dd_channel_id', 'read_messages', 'send_messages', 'read_message_history') if problems: print(f"[SelfCheck:{g.name}]") diff --git a/modules/common/boot_notice.py b/modules/common/boot_notice.py index fb0f42a..29fa1af 100644 --- a/modules/common/boot_notice.py +++ b/modules/common/boot_notice.py @@ -181,6 +181,10 @@ async def post_boot_notice(bot): except Exception as e: print(f"[boot_notice] wait_until_ready failed: {e}") + + for guild in bot.guilds: + print(f' - {guild.name} (id: {guild.id})') + r = cfg(bot) modlog_channel_id = r.int('modlog_channel_id', 0) if not modlog_channel_id: diff --git a/modules/dd/__init__.py b/modules/dd/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/modules/dd/dd_loot_table.py b/modules/dd/dd_loot_table.py new file mode 100644 index 0000000..6217f47 --- /dev/null +++ b/modules/dd/dd_loot_table.py @@ -0,0 +1,781 @@ +# modules/dd/dd_loot_table.py +from __future__ import annotations + +import asyncio +import hashlib +import os +import re +import time +from dataclasses import dataclass +from datetime import datetime, timedelta, timezone +from typing import Any, Dict, List, Optional, Tuple, Literal + +import aiohttp +import discord +from discord.ext import commands +from discord import app_commands + +from modules.common.settings import cfg + +DD_FALLBACK_CHANNEL = 1404764793377652807 +DD_URL = "https://dune.gaming.tools/deep-desert" +OWNER_ID = 203190147582394369 # for error notices + +def _log(*a): print("[DD]", *a) +def _utcnow() -> datetime: return datetime.now(timezone.utc) + +def _this_week_anchor(now: Optional[datetime] = None) -> datetime: + if now is None: now = _utcnow() + target_wd = 1 # Tue + cur_wd = now.weekday() + delta_days = (cur_wd - target_wd) % 7 + anchor_date = (now - timedelta(days=delta_days)).date() + anchor_dt = datetime(anchor_date.year, anchor_date.month, anchor_date.day, 3, 0, 0, tzinfo=timezone.utc) + if now < anchor_dt: anchor_dt -= timedelta(days=7) + return anchor_dt + +def _next_week_anchor(after: Optional[datetime] = None) -> datetime: + return _this_week_anchor(after) + timedelta(days=7) + +def _backoff_delay_secs(waiting_since: float, now_ts: float) -> int: + waited = max(0.0, now_ts - waiting_since) + if waited < 3600: return 5 * 60 + if waited < 3 * 3600: return 15 * 60 + if waited < 6 * 3600: return 30 * 60 + if waited < 24 * 3600: return 60 * 60 + return 3 * 60 * 60 + +@dataclass +class DDState: + channel_id: int + message_id: Optional[int] + disabled: bool + + # hashes + last_hash: str # current cycle + prev_hash: str # previous cycle + last_post_hash: str # hash of the message content currently posted + + week_anchor_ts: int + last_success_ts: int + waiting_since_ts: int + last_attempt_ts: int + + @classmethod + def from_dm(cls, dm) -> "DDState": + rows = dm.get("dd_state") + row = rows[0] if rows else {} + env_raw = os.getenv("SHAI_DD_CHANNEL_ID", "").strip().strip('"').strip("'") + env_cid = int(env_raw) if env_raw.isdigit() else 0 + try: + stored_cid = int(row.get("channel_id") or 0) + except Exception: + stored_cid = 0 + chosen_cid = env_cid or stored_cid or DD_FALLBACK_CHANNEL + return cls( + channel_id=chosen_cid, + message_id=row.get("message_id"), + disabled=bool(row.get("disabled", False)), + last_hash=str(row.get("last_hash", "")), + prev_hash=str(row.get("prev_hash", "")), + last_post_hash=str(row.get("last_post_hash", "")), + week_anchor_ts=int(row.get("week_anchor_ts", 0)), + last_success_ts=int(row.get("last_success_ts", 0)), + waiting_since_ts=int(row.get("waiting_since_ts", 0)), + last_attempt_ts=int(row.get("last_attempt_ts", 0)), + ) + + def to_row(self) -> Dict[str, Any]: + return { + "channel_id": self.channel_id, + "message_id": self.message_id, + "disabled": self.disabled, + "last_hash": self.last_hash, + "prev_hash": self.prev_hash, + "last_post_hash": self.last_post_hash, + "week_anchor_ts": self.week_anchor_ts, + "last_success_ts": self.last_success_ts, + "waiting_since_ts": self.waiting_since_ts, + "last_attempt_ts": self.last_attempt_ts, + } + +# ---------- parsing ---------- + +_USER_AGENT = ( + "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 " + "(KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36" +) + +DETAILS_BLOCK_RE = re.compile(r"]*>.*?", re.I | re.S) +NAME_SPAN_RE = re.compile(r"]*>.*?]*>(?P[^<]+).*?", re.I | re.S) +ROW_RE = re.compile( + r']*class="[^"]*flex[^"]*items-center[^"]*gap-2[^"]*"[^>]*>\s*' + r']*class="[^"]*w-8[^"]*text-center[^"]*"[^>]*>\s*(?P[A-Z]\d+)\s*
\s*' + r']*>\s*(?P[^<]+?)\s*
.*?' + r']*class="[^"]*ml-auto[^"]*"[^>]*>.*?' + r']*class="[^"]*w-10[^"]*text-center[^"]*"[^>]*>\s*(?P[^<]+?)\s*
\s*' + r']*>\s*(?P~?\d+%|\d+\.\d+%)\s*
.*?' + r'\s*', + re.I | re.S, +) + +def _parse_dd_html(html: str) -> List[Dict[str, str]]: + results: List[Dict[str, str]] = [] + for dmatch in DETAILS_BLOCK_RE.finditer(html or ""): + block = dmatch.group(0) + nmatch = NAME_SPAN_RE.search(block) + if not nmatch: continue + name = " ".join(nmatch.group("name").split()) + for rmatch in ROW_RE.finditer(block): + grid = " ".join(rmatch.group("grid").split()) + loc = " ".join(rmatch.group("loc").split()) + amt = " ".join(rmatch.group("amt").split()) + chance = " ".join(rmatch.group("chance").split()) + results.append({"name": name, "grid": grid, "loc": loc, "amount": amt, "chance": chance}) + return results + +def _hash_text(s: str) -> str: + return hashlib.sha1(s.encode("utf-8")).hexdigest() + +def _hash_records(rows) -> str: + rows = _sanitize_rows(rows) + m = hashlib.sha256() + for r in rows: + m.update(f"{r['name']}|{r['grid']}|{r['loc']}|{r['amount']}|{r['chance']}\n".encode("utf-8")) + return m.hexdigest() + +# ---------- formatters ---------- + +def _as_str(v) -> str: + """Coerce any value (incl. lists/tuples) to a compact string.""" + if isinstance(v, str): + return v + if isinstance(v, (list, tuple, set)): + try: + return ", ".join(map(str, v)) + except Exception: + return str(v) + return str(v) + +def _sanitize_rows(rows): + """Return rows with all fields as trimmed strings; safe for hashing/formatting.""" + out = [] + for r in rows or []: + out.append({ + "name": _as_str(r.get("name", "")).strip(), + "grid": _as_str(r.get("grid", "")).strip().upper(), + "loc": _as_str(r.get("loc", "")).strip(), + "amount": _as_str(r.get("amount", "")).strip().replace("–", "-"), + "chance": _as_str(r.get("chance", "")).strip().replace(" ", ""), + }) + return out + +def _abbr_loc(loc: str) -> str: + """Shorten common locations to save characters.""" + m = { + "Imperial Testing Station": "Imp. Testing Station", + "Large Shipwreck": "L. Shipwreck", + "Small Shipwreck": "S. Shipwreck", + } + return m.get(loc.strip(), loc.strip()) + +def _grid_sort_key(g: str): + """Sort grids like A1, A2, B10 naturally.""" + g = g.strip().upper() + if not g: + return ("Z", 999) + letter, num = g[0], g[1:] + try: + n = int(num) + except Exception: + n = 999 + return (letter, n) + +def _fit_discord_message(lines: list[str], header: str, budget: int = 1900) -> str: + """Join lines under budget with a truncation notice if needed.""" + out = [header] + total = len(header) + 1 + dropped = 0 + for ln in lines: + ln_len = len(ln) + 1 + if total + ln_len > budget: + dropped += 1 + continue + out.append(ln) + total += ln_len + if dropped: + out.append(f"... _(truncated {dropped} lines)_") + return "\n".join(out) + +def _fmt_waiting(anchor_dt: datetime) -> str: + when = anchor_dt.strftime("%Y-%m-%d %H:%M UTC") + return ("**Deep Desert — Weekly Uniques**\n" + f"_Reset detected (week starting **{when}**)._\n" + "Waiting for the new loot table to appear...\n" + "This message will update automatically once the new data is available.") + +def _fmt_error(anchor_dt: datetime, note: str) -> str: + when = anchor_dt.strftime("%Y-%m-%d %H:%M UTC") + return ("**Deep Desert — Weekly Uniques**\n" + f"_Week starting **{when}**._\n" + f"⚠️ {note}\n" + f"<@{OWNER_ID}> will investigate.") + +def _fmt_rows(rows, anchor_dt: datetime) -> str: + from collections import OrderedDict + + rows = _sanitize_rows(rows) + + def _abbr_loc(loc: str) -> str: + m = { + "Imperial Testing Station": "Imp. Testing Station", + "Large Shipwreck": "L. Shipwreck", + "Small Shipwreck": "S. Shipwreck", + } + return m.get(loc, loc) + + def _grid_sort_key(g: str): + g = (g or "").upper() + if not g: return ("Z", 999) + letter, num = g[0], g[1:] + try: n = int(num) + except: n = 999 + return (letter, n) + + # item -> location -> (amount, chance) -> [grids] + grouped: "OrderedDict[str, OrderedDict[str, Dict[Tuple[str, str], List[str]]]]" = OrderedDict() + for r in sorted(rows, key=lambda x: (x["name"], _abbr_loc(x["loc"]), _grid_sort_key(x["grid"]))): + item, loc, grid, amt, ch = r["name"], _abbr_loc(r["loc"]), r["grid"], r["amount"], r["chance"] + grouped.setdefault(item, OrderedDict()).setdefault(loc, {}).setdefault((amt, ch), []).append(grid) + + lines = [] + for item, loc_map in grouped.items(): + lines.append(f"- **{item}**") + for loc, by_ac in loc_map.items(): + lines.append(f" - {loc}") + + def _sort_ac(k): + amt, ch = k + try: + chv = float(ch.lstrip("~").rstrip("%")) + except Exception: + chv = -1.0 + return (-chv, amt) + + for (amt, ch), grids in sorted(by_ac.items(), key=_sort_ac): + gstr = ", ".join(sorted(set(grids), key=_grid_sort_key)) + lines.append(f" - {gstr} - {amt} ({ch})") + + when = anchor_dt.strftime("%Y-%m-%d %H:%M UTC") + header = f"**Deep Desert — Weekly Uniques** _(week starting **{when}**)_" + return _fit_discord_message(lines, header, budget=1900) + +# ---------- HTTP fetchers ---------- + +async def _fetch_via_aiohttp(session: aiohttp.ClientSession, url: str) -> str: + headers = { + "User-Agent": _USER_AGENT, + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", + "Accept-Language": "en-US,en;q=0.9", + "Cache-Control": "no-cache", + "Pragma": "no-cache", + } + timeout = aiohttp.ClientTimeout(total=20, sock_connect=10, sock_read=10) + async with session.get(url, headers=headers, allow_redirects=True, timeout=timeout) as resp: + text = await resp.text() + if resp.status >= 400: + raise aiohttp.ClientResponseError( + request_info=resp.request_info, history=resp.history, + status=resp.status, message=f"HTTP {resp.status}", headers=resp.headers + ) + return text + +# ---------- Playwright (headless) ---------- + +class _PlaywrightPool: + """Lazy, optional Playwright Chromium pool (single context).""" + def __init__(self): + self.apw = None + self.browser = None + self.context = None + self.enabled = False + + async def ensure(self) -> bool: + if self.enabled and self.apw and self.browser and self.context: + return True + try: + from playwright.async_api import async_playwright # type: ignore + except Exception: + return False + + self.apw = await async_playwright().start() + # flags for container/root environments + reduce automation signals + self.browser = await self.apw.chromium.launch( + headless=True, + args=[ + "--no-sandbox", + "--disable-dev-shm-usage", + "--disable-gpu", + "--disable-blink-features=AutomationControlled", + ], + ) + self.context = await self.browser.new_context( + user_agent=_USER_AGENT, + locale="en-US", + timezone_id="UTC", + java_script_enabled=True, + ignore_https_errors=True, + viewport={"width": 1366, "height": 900}, + extra_http_headers={ + "Accept-Language": "en-US,en;q=0.9", + "Upgrade-Insecure-Requests": "1", + }, + ) + # Minimal stealth: remove webdriver and add a few common props + await self.context.add_init_script(""" + Object.defineProperty(navigator, 'webdriver', { get: () => undefined }); + Object.defineProperty(navigator, 'languages', { get: () => ['en-US', 'en'] }); + Object.defineProperty(navigator, 'platform', { get: () => 'Win32' }); + Object.defineProperty(navigator, 'plugins', { get: () => [1,2,3,4,5] }); + """) + self.enabled = True + return True + + async def close(self): + try: + if self.context: await self.context.close() + finally: + try: + if self.browser: await self.browser.close() + finally: + try: + if self.apw: await self.apw.stop() + finally: + self.apw = self.browser = self.context = None + self.enabled = False + + async def fetch(self, url: str, timeout_ms: Optional[int] = None, wait: Optional[str] = None) -> str: + """ + Fetch fully rendered HTML with tolerant waiting against Cloudflare. + Env overrides: + SHAI_DD_PW_TIMEOUT_MS (default 45000) + SHAI_DD_PW_WAIT = domcontentloaded|load|networkidle (default domcontentloaded) + """ + if not await self.ensure(): + raise RuntimeError("playwright-unavailable") + + timeout_ms = int(os.getenv("SHAI_DD_PW_TIMEOUT_MS", "45000") or "45000") if timeout_ms is None else timeout_ms + wait_mode = (os.getenv("SHAI_DD_PW_WAIT", "domcontentloaded") or "domcontentloaded").lower() + if wait: wait_mode = wait + + page = await self.context.new_page() + + # Keep media traffic low but don't block fonts/CSS/JS (CF sometimes needs them) + async def _route(route): + rt = route.request.resource_type + if rt in ("media", "video", "audio"): + await route.abort() + else: + await route.continue_() + await page.route("**/*", _route) + + # Step 1: navigate, but don't require networkidle (CF pages rarely go "idle") + await page.goto(url, wait_until=wait_mode, timeout=timeout_ms) + + # Step 2: loop for CF auto-redirect and app hydration + # We'll try up to ~35s total here. + end_by = time.time() + max(20, timeout_ms / 1000 - 5) + last_details = 0 + while time.time() < end_by: + html = await page.content() + u = page.url + # If we're still on a CF challenge or "just a moment" page, give it a bit + if ("cdn-cgi/challenge" in u) or ("cf-chl" in u) or ("Just a moment" in html) or ("Please wait" in html): + await page.wait_for_timeout(2500) + continue + + # Check if our target content looks present + try: + count = await page.locator("details").count() + except Exception: + count = 0 + last_details = max(last_details, count) + if count > 0: + break + + await page.wait_for_timeout(1500) + + html = await page.content() + await page.close() + return html + +# ---------- Cog ---------- + +class DDLootTableCog(commands.Cog): + def __init__(self, bot: commands.Bot): + self.bot = bot + r = cfg(bot) + self.dd_url = r.get("dd_url", DD_URL) + try: + self.channel_id_default = int(r.get("dd_channel_id", DD_FALLBACK_CHANNEL)) + except Exception: + self.channel_id_default = DD_FALLBACK_CHANNEL + + self._task: Optional[asyncio.Task] = None + self._session: Optional[aiohttp.ClientSession] = None + self._pw = _PlaywrightPool() + self._last_debug: str = "" + + async def cog_load(self): + self._session = aiohttp.ClientSession() + if self._task is None: + self._task = asyncio.create_task(self._runner(), name="DDLootTableRunner") + _log("cog loaded; runner started:", bool(self._task), "url:", self.dd_url) + + async def cog_unload(self): + t, self._task = self._task, None + if t: t.cancel() + s, self._session = self._session, None + if s: await s.close() + try: + await self._pw.close() + except Exception: + pass + _log("cog unloaded; runner/task closed") + + # ---- state ---- + + def _load_state(self) -> DDState: + st = DDState.from_dm(self.bot.data_manager) + env_raw = os.getenv("SHAI_DD_CHANNEL_ID", "").strip().strip('"').strip("'") + env_cid = int(env_raw) if env_raw.isdigit() else 0 + if env_cid and env_cid != st.channel_id: + st.channel_id = env_cid + self._save_state(st.to_row()) + _log(f"channel id overridden by ENV -> {env_cid}") + _log(f"state loaded: ch={st.channel_id} msg={st.message_id} disabled={st.disabled}") + return st + + def _save_state(self, patch: Dict[str, Any]) -> None: + dm = self.bot.data_manager + rows = dm.get("dd_state") + if not rows: + dm.add("dd_state", patch); return + def pred(_): return True + def upd(d): d.update(patch); return d + dm.update("dd_state", pred, upd) + + # ---- message helpers ---- + + async def _resolve_channel(self, channel_id: int) -> Optional[discord.TextChannel]: + ch = self.bot.get_channel(channel_id) + if ch is None: + try: ch = await self.bot.fetch_channel(channel_id) + except Exception: ch = None + if not isinstance(ch, discord.TextChannel): return None + me = ch.guild.me + if me: + p = ch.permissions_for(me) + if not (p.read_messages and p.send_messages): + _log(f"missing perms in #{ch.name} ({ch.id})") + return ch + + async def _ensure_message(self, st: DDState, content_if_create: Optional[str]) -> Optional[discord.Message]: + ch = await self._resolve_channel(st.channel_id) + if not ch: + _log("target channel not found/invalid:", st.channel_id) + return None + + if st.message_id: + try: + return await ch.fetch_message(st.message_id) + except discord.NotFound: + st.message_id = None + self._save_state({"message_id": None}) + except discord.Forbidden: + _log("cannot fetch message (no history); will NOT create a new one") + return None + except Exception as e: + _log("fetch_message failed:", repr(e)) + return None + + if content_if_create is None: + return None + try: + msg = await ch.send(content_if_create) + st.message_id = msg.id + st.last_post_hash = _hash_text(content_if_create) + self._save_state({"channel_id": st.channel_id, "message_id": msg.id, "last_post_hash": st.last_post_hash}) + return msg + except Exception as e: + _log("failed to create message:", repr(e)) + return None + + async def _set_message(self, st: DDState, content: str) -> Optional[int]: + """Create-or-edit the single managed message. Returns message_id (if known) and stores last_post_hash.""" + msg = await self._ensure_message(st, content_if_create=content if not st.message_id else None) + if not msg: + return None + try: + await msg.edit(content=content) + st.last_post_hash = _hash_text(content) + self._save_state({"last_post_hash": st.last_post_hash}) + except discord.NotFound: + st.message_id = None + self._save_state({"message_id": None}) + msg2 = await self._ensure_message(st, content_if_create=content) + if msg2: + try: + await msg2.edit(content=content) + st.last_post_hash = _hash_text(content) + self._save_state({"message_id": msg2.id, "last_post_hash": st.last_post_hash}) + except Exception: + pass + except discord.Forbidden: + _log("edit forbidden; single-message mode keeps state") + except Exception as e: + _log("edit failed:", repr(e)) + return st.message_id + + # ---- fetch orchestration ---- + + async def _fetch_dd_html_any(self) -> Tuple[str, str]: + """Return (html, backend_tag). Preference: env → playwright(if available) → aiohttp.""" + prefer = os.getenv("SHAI_DD_FETCHER", "").lower() + # prefer Playwright + if prefer in {"playwright", "pw", "browser"}: + if await self._pw.ensure(): + html = await self._pw.fetch(self.dd_url) + return html, "playwright" + else: + # opportunistic: try Playwright first if available + try: + if await self._pw.ensure(): + html = await self._pw.fetch(self.dd_url) + return html, "playwright" + except Exception: + pass + # fallback: aiohttp (may 403) + html = await _fetch_via_aiohttp(self._session, self.dd_url) + return html, "aiohttp" + + async def _attempt_fetch(self) -> Tuple[bool, List[Dict[str, str]], str]: + import asyncio + self._last_debug = "" + if not self._session: + self._last_debug = "internal: no HTTP session" + return (False, [], "unable to check for new loot (will retry)") + try: + html, backend = await self._fetch_dd_html_any() + self._last_debug = f"ok via {backend}" + except aiohttp.ClientResponseError as e: + self._last_debug = f"http {getattr(e,'status','?')} (aiohttp)" + return (False, [], "unable to check for new loot (will retry)") + except asyncio.TimeoutError: + self._last_debug = "timeout" + return (False, [], "unable to check for new loot (will retry)") + except Exception as e: + self._last_debug = f"{e.__class__.__name__}: {e}" + return (False, [], "unable to check for new loot (will retry)") + + try: + rows = _parse_dd_html(html) + if not rows: + self._last_debug = "parse: zero rows" + return (False, [], "no loot entries detected yet (will retry)") + clean = [] + for r in rows: + name = r["name"].strip() + grid = r["grid"].strip().upper() + loc = r["loc"].strip() + amt = r["amount"].strip().replace("–", "-") + chance = r["chance"].strip().replace(" ", "") + if not name or not re.match(r"^[A-Z]\d+$", grid): + continue + clean.append({"name": name, "grid": grid, "loc": loc, "amount": amt, "chance": chance}) + if not clean: + self._last_debug = "parse: filtered to zero rows" + return (False, [], "loot data format changed (will retry)") + return (True, clean, "") + except Exception as e: + self._last_debug = f"parse error: {e.__class__.__name__}: {e}" + return (False, [], "loot data parse error (will retry)") + + # ---- manual kick ---- + + async def _manual_kick_once(self, st: DDState) -> str: + anchor_dt = _this_week_anchor() + + # always show "waiting" briefly so users see it's been kicked + mid = await self._set_message(st, _fmt_waiting(anchor_dt)) + if mid and not st.message_id: + st.message_id = mid + self._save_state(st.to_row()) + + ok, rows, note = await self._attempt_fetch() + if not ok or not rows: + if note: + await self._set_message(st, _fmt_error(anchor_dt, note)) + return f"Fetch failed: {note or 'unknown error'}" + + new_hash = _hash_records(rows) + + if st.prev_hash and new_hash == st.prev_hash: + # still last week's data; keep waiting + await self._set_message(st, _fmt_waiting(anchor_dt)) + return "Data unchanged from previous cycle; still waiting." + + table = _fmt_rows(rows, anchor_dt) + + if st.last_hash and new_hash == st.last_hash: + # same as what we already posted this cycle → ensure table is visible + await self._set_message(st, table) + return "Data unchanged; table ensured." + + # fresh for this cycle + st.last_hash = new_hash + st.last_success_ts = int(time.time()) + self._save_state(st.to_row()) + await self._set_message(st, table) + return "Posted fresh data." + + # ---- runner ---- + + async def _runner(self): + await self.bot.wait_until_ready() + _log("runner loop started") + while not self.bot.is_closed(): + try: + st = self._load_state() + if st.disabled: + await asyncio.sleep(300); continue + + now_dt = _utcnow() + this_anchor_dt = _this_week_anchor(now_dt) + this_anchor_ts = int(this_anchor_dt.timestamp()) + next_anchor_dt = _next_week_anchor(now_dt) + + if st.week_anchor_ts != this_anchor_ts: + # roll current → prev; reset current + st.prev_hash = st.last_hash or st.prev_hash + st.last_hash = "" + st.week_anchor_ts = this_anchor_ts + st.last_success_ts = 0 + st.waiting_since_ts = this_anchor_ts + st.last_attempt_ts = 0 + self._save_state(st.to_row()) + + mid = await self._set_message(st, _fmt_waiting(this_anchor_dt)) + if mid and not st.message_id: + st.message_id = mid + self._save_state(st.to_row()) + _log("new week anchor -> waiting UPDATED (single-message)") + + if st.last_success_ts >= this_anchor_ts and st.last_success_ts < int(next_anchor_dt.timestamp()): + await asyncio.sleep(min(3600, max(60, int(next_anchor_dt.timestamp() - time.time())))) + continue + + if st.waiting_since_ts == 0: + st.waiting_since_ts = this_anchor_ts + + delay = _backoff_delay_secs(st.waiting_since_ts, time.time()) + + if st.last_attempt_ts == 0 or (time.time() - st.last_attempt_ts) >= delay: + ok, rows, note = await self._attempt_fetch() + st.last_attempt_ts = int(time.time()) + self._save_state(st.to_row()) + + if ok and rows: + new_hash = _hash_records(rows) + + # 1) identical to last cycle → keep waiting; keep polling + if st.prev_hash and new_hash == st.prev_hash: + waiting = _fmt_waiting(this_anchor_dt) + if st.last_post_hash != _hash_text(waiting): + await self._set_message(st, waiting) + _log("data equals prev week; still waiting") + # no success_ts update; try again with backoff + else: + table = _fmt_rows(rows, this_anchor_dt) + + # 2) same as current hash → ensure table is visible (flip off any waiting message) + if st.last_hash and new_hash == st.last_hash: + if st.last_post_hash != _hash_text(table): + await self._set_message(st, table) + _log("data same as already posted; ensured table visible") + # already have success this cycle; sleep a bit longer + await asyncio.sleep(900) + continue + + # 3) fresh data for this cycle → post table, mark success + st.last_hash = new_hash + st.last_success_ts = int(time.time()) + self._save_state(st.to_row()) + await self._set_message(st, table) + _log("updated weekly uniques (fresh data)") + await asyncio.sleep(900) + continue + + else: + if note: + await self._set_message(st, _fmt_error(this_anchor_dt, note)) + _log("fetch failed:", note, "| debug:", self._last_debug) + + await asyncio.sleep(30) + + except asyncio.CancelledError: + break + except Exception as e: + _log("runner error:", repr(e)); await asyncio.sleep(30) + _log("runner loop stopped") + + # ---- command ---- + + @app_commands.command(name="dd_update", description="Control the Deep Desert weekly loot updater") + @app_commands.describe(action="stop/resume/start", reason="Optional reason") + async def dd_update(self, interaction: discord.Interaction, + action: Literal["stop", "resume", "start"], + reason: Optional[str] = None): + st = self._load_state() + is_owner = bool(interaction.guild and interaction.user.id == getattr(interaction.guild, "owner_id", 0)) + if action == "start": + perms_ok = is_owner + else: + perms = interaction.user.guild_permissions if interaction.guild else None + perms_ok = bool(is_owner or (perms and perms.manage_guild)) + if not perms_ok: + return await interaction.response.send_message("You don't have permission to do that.", ephemeral=True) + + if action == "stop": + st.disabled = True; self._save_state(st.to_row()) + msg = "DD updater stopped."; + if reason: msg += f" Reason: {reason}" + return await interaction.response.send_message(msg, ephemeral=True) + + if action == "resume": + st.disabled = False; self._save_state(st.to_row()) + return await interaction.response.send_message("DD updater resumed.", ephemeral=True) + + # start (owner-only) + st.disabled = False + now_dt = _utcnow() + st.week_anchor_ts = int(_this_week_anchor(now_dt).timestamp()) + st.waiting_since_ts = int(time.time()) + st.last_attempt_ts = 0 + self._save_state(st.to_row()) + + ch = await self._resolve_channel(st.channel_id) + if not ch: + return await interaction.response.send_message( + f"Manual start queued, but the target channel is invalid or missing.\n" + f"Set **SHAI_DD_CHANNEL_ID** to a valid text channel ID (current: `{st.channel_id}`).", + ephemeral=True + ) + + await interaction.response.defer(ephemeral=True) + status = await self._manual_kick_once(st) + dbg = f" (debug: {self._last_debug})" if self._last_debug else "" + await interaction.followup.send(f"Manual start triggered. {status}{dbg}", ephemeral=True) + +async def setup(bot: commands.Bot): + await bot.add_cog(DDLootTableCog(bot)) diff --git a/modules/docs_site/docs_site.py b/modules/docs_site/docs_site.py index 8fe6379..3c83f59 100644 --- a/modules/docs_site/docs_site.py +++ b/modules/docs_site/docs_site.py @@ -290,8 +290,8 @@ def _gather_prefix_and_hybrid(bot: commands.Bot) -> List[Dict[str, Any]]: "cog": getattr(cmd.cog, "qualified_name", None) if getattr(cmd, "cog", None) else None, "module": getattr(getattr(cmd, "callback", None), "__module__", None), "moderator_only": bool(is_mod), + "admin_only": False, "required_permissions": perms, - # NEW: counter fields "counter_key": qn, "exec_count": _cmd_counter(bot, qn), } @@ -364,10 +364,10 @@ def _gather_slash(bot: commands.Bot) -> List[Dict[str, Any]]: "cog": binding.__class__.__name__ if binding else None, "module": getattr(callback, "__module__", None) if callback else None, "moderator_only": bool(is_mod), + "admin_only": False, "required_permissions": perms, "extras": _safe_extras(leaf), "dm_permission": getattr(leaf, "dm_permission", None), - # NEW: counter fields "counter_key": qn, "exec_count": _cmd_counter(bot, qn), } @@ -565,6 +565,8 @@ def _merge_hybrid_slash(rows: List[Dict[str, Any]]) -> None: h["help"] = r["help"] if r.get("moderator_only"): h["moderator_only"] = True + if r.get("admin_only"): + h["admin_only"] = True if r.get("required_permissions"): h["required_permissions"] = sorted(set((h.get("required_permissions") or []) + r["required_permissions"])) if not h.get("extras") and r.get("extras"): @@ -598,8 +600,11 @@ def build_command_schema(bot: commands.Bot) -> Dict[str, Any]: for row in all_rows: try: helptext = f"{row.get('help') or ''} {row.get('brief') or ''}" - if "[mod]" in helptext.lower(): + hl = helptext.lower() + if "[mod]" in hl: row["moderator_only"] = True + if "[admin]" in hl: + row["admin_only"] = True except Exception: pass if row.get("required_permissions"): @@ -608,6 +613,8 @@ def build_command_schema(bot: commands.Bot) -> Dict[str, Any]: ex = row.get("extras") or {} if isinstance(ex, dict) and str(ex.get("category", "")).lower() in {"mod", "moderator", "staff"}: row["moderator_only"] = True + if isinstance(ex, dict) and str(ex.get("category", "")).lower() in {"admin", "administrator", "owner"}: + row["admin_only"] = True except Exception: pass @@ -622,8 +629,8 @@ def build_command_schema(bot: commands.Bot) -> Dict[str, Any]: all_rows.sort(key=_sort_key) - mods = [r for r in all_rows if r.get("moderator_only")] - users = [r for r in all_rows if not r.get("moderator_only")] + mods = [r for r in all_rows if r.get("moderator_only") or r.get("admin_only")] + users = [r for r in all_rows if not (r.get("moderator_only") or r.get("admin_only"))] return { "title": "ShaiWatcher Commands",