patch
This commit is contained in:
parent
0ceebb5a57
commit
55e8693a3f
84
animecli.py
84
animecli.py
@ -10,13 +10,11 @@ import concurrent.futures
|
||||
import configparser
|
||||
import os
|
||||
|
||||
# Chemins par défaut
|
||||
DB_DIR = Path.home() / ".animecli"
|
||||
DB_PATH = DB_DIR / "animes.db"
|
||||
DOWNLOAD_DIR = Path.home() / "MesAnimes"
|
||||
CONFIG_PATH = DB_DIR / "config.ini"
|
||||
|
||||
# Valeurs par défaut
|
||||
DEFAULT_CONFIG = {
|
||||
"vitesse_max": "0",
|
||||
"telechargements_simultanes": "1",
|
||||
@ -85,7 +83,23 @@ def get_all_animes(conn):
|
||||
c = conn.cursor()
|
||||
return c.execute("SELECT id, titre, url, saison, dernier_episode FROM animes").fetchall()
|
||||
|
||||
def extract_master_m3u8_url(page_url):
|
||||
try:
|
||||
resp = requests.get(page_url)
|
||||
resp.raise_for_status()
|
||||
urls = re.findall(r'https?://[^\s\'"]+\.m3u8[^\s\'"]*', resp.text)
|
||||
for url in urls:
|
||||
if "master.m3u8" in url:
|
||||
return url
|
||||
return urls[0] if urls else None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def get_source_info(url):
|
||||
if url.endswith('.html'):
|
||||
m3u8_url = extract_master_m3u8_url(url)
|
||||
if m3u8_url:
|
||||
url = m3u8_url
|
||||
ydl_opts = {
|
||||
'quiet': True,
|
||||
'skip_download': True,
|
||||
@ -98,25 +112,41 @@ def get_source_info(url):
|
||||
if not formats:
|
||||
return None
|
||||
best = max(formats, key=lambda f: f.get('height', 0) or 0)
|
||||
taille = best.get('filesize') or best.get('filesize_approx')
|
||||
taille_mo = f"{taille // (1024*1024)} Mo" if taille else "?"
|
||||
qualite = f"{best.get('height', '?')}p"
|
||||
return {
|
||||
'qualite': qualite,
|
||||
'taille': taille_mo,
|
||||
'title': info.get('title', ''),
|
||||
'url': url
|
||||
}
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def choisir_source(stdscr, sources_infos):
|
||||
def choisir_source_globale(stdscr, episode_data):
|
||||
# Regroupe les sources par index (site)
|
||||
sources_by_site = []
|
||||
for i in range(len(episode_data[0][0])): # nombre de sources par épisode
|
||||
urls = []
|
||||
for ep in episode_data:
|
||||
if len(ep[0]) > i:
|
||||
urls.append(ep[0][i])
|
||||
if urls:
|
||||
sources_by_site.append(urls)
|
||||
# Utilise le premier URL de chaque site pour la qualité
|
||||
preview_urls = [urls[0] for urls in sources_by_site]
|
||||
infos = [None] * len(preview_urls)
|
||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
future_to_idx = {executor.submit(get_source_info, url): i for i, url in enumerate(preview_urls)}
|
||||
done = set()
|
||||
sel = 0
|
||||
while True:
|
||||
stdscr.clear()
|
||||
stdscr.addstr(0, 0, "Choisissez la source pour cet épisode :")
|
||||
for idx, info in enumerate(sources_infos):
|
||||
line = f"{idx+1}. {info['url']} ({info['qualite']}, {info['taille']})"
|
||||
stdscr.addstr(0, 0, "Choisissez la source à utiliser pour tous les épisodes :")
|
||||
for idx, url in enumerate(preview_urls):
|
||||
info = infos[idx]
|
||||
domain = re.sub(r'^https?://(www\.)?', '', url).split('/')[0]
|
||||
if info:
|
||||
line = f"{idx+1}. {domain} ({info['qualite']})"
|
||||
else:
|
||||
line = f"{idx+1}. {domain} (chargement...)"
|
||||
if idx == sel:
|
||||
stdscr.attron(curses.color_pair(1))
|
||||
safe_addstr(stdscr, 2 + idx, 2, line)
|
||||
@ -124,13 +154,21 @@ def choisir_source(stdscr, sources_infos):
|
||||
else:
|
||||
safe_addstr(stdscr, 2 + idx, 2, line)
|
||||
stdscr.refresh()
|
||||
try:
|
||||
for future in concurrent.futures.as_completed(future_to_idx, timeout=0.1):
|
||||
idx = future_to_idx[future]
|
||||
if idx not in done:
|
||||
infos[idx] = future.result()
|
||||
done.add(idx)
|
||||
except concurrent.futures.TimeoutError:
|
||||
pass
|
||||
k = stdscr.getch()
|
||||
if k == curses.KEY_UP and sel > 0:
|
||||
sel -= 1
|
||||
elif k == curses.KEY_DOWN and sel < len(sources_infos) - 1:
|
||||
elif k == curses.KEY_DOWN and sel < len(preview_urls) - 1:
|
||||
sel += 1
|
||||
elif k in [curses.KEY_ENTER, 10, 13]:
|
||||
return sel
|
||||
return sel, sources_by_site[sel]
|
||||
|
||||
def extract_episode_sources(url_page):
|
||||
resp = requests.get(url_page)
|
||||
@ -170,6 +208,10 @@ def format_dernier(dernier_url, saison):
|
||||
return f"Saison {saison}, épisode {ep}, {dernier_url}"
|
||||
|
||||
def telecharger_episode(url, saison_folder, filename, qualite):
|
||||
if url.endswith('.html'):
|
||||
m3u8_url = extract_master_m3u8_url(url)
|
||||
if m3u8_url:
|
||||
url = m3u8_url
|
||||
ydl_opts = {
|
||||
"outtmpl": str(saison_folder / filename),
|
||||
"format": f"bestvideo[height<={qualite.rstrip('p')}]+bestaudio/best",
|
||||
@ -224,12 +266,15 @@ def handle_multi_download(stdscr, conn):
|
||||
stdscr.addstr(0, 0, f"Erreur récupération des sources: {e}")
|
||||
stdscr.getch()
|
||||
return
|
||||
if not episode_data:
|
||||
if not episode_data or not episode_data[0][0]:
|
||||
stdscr.clear()
|
||||
stdscr.addstr(0, 0, "Aucun épisode trouvé.")
|
||||
stdscr.addstr(0, 0, "Aucune source trouvée.")
|
||||
stdscr.getch()
|
||||
return
|
||||
|
||||
# Choix de la source globale (site)
|
||||
sel_src, urls_par_source = choisir_source_globale(stdscr, episode_data)
|
||||
|
||||
selected = [False] * len(episode_data)
|
||||
cursor = 0
|
||||
scroll_offset = 0
|
||||
@ -302,16 +347,9 @@ def handle_multi_download(stdscr, conn):
|
||||
base_folder = Path(CONFIG["download_dir"]) / titre
|
||||
download_queue = []
|
||||
for idx in to_download:
|
||||
sources, _ = episode_data[idx]
|
||||
infos = []
|
||||
for src in sources:
|
||||
info = get_source_info(src)
|
||||
if info:
|
||||
infos.append(info)
|
||||
if not infos:
|
||||
if idx >= len(urls_par_source):
|
||||
continue
|
||||
sel_src = choisir_source(stdscr, infos) if len(infos) > 1 else 0
|
||||
chosen_url = infos[sel_src]['url']
|
||||
chosen_url = urls_par_source[idx]
|
||||
saison_str = f"S{int(saison):02d}"
|
||||
ep_str = f"E{idx+1:02d}"
|
||||
saison_folder = base_folder / f"Saison {int(saison):02d}"
|
||||
|
Loading…
x
Reference in New Issue
Block a user