[cli/downloader] Migrate to Queue based logging for subprocesses

This commit is contained in:
derrod 2020-04-26 13:19:28 +02:00
parent 035e23b964
commit 0485a728e3
3 changed files with 136 additions and 64 deletions

View file

@ -3,32 +3,41 @@
import argparse import argparse
import logging import logging
import multiprocessing
import os import os
import shlex import shlex
import subprocess import subprocess
import time import time
import webbrowser import webbrowser
from logging.handlers import QueueHandler, QueueListener
from multiprocessing import freeze_support, Queue as MPQueue
from sys import exit from sys import exit
from legendary.core import LegendaryCore from legendary.core import LegendaryCore
from legendary.models.exceptions import InvalidCredentialsError from legendary.models.exceptions import InvalidCredentialsError
# todo custom formatter for cli logger (clean info, highlighted error/warning)
logging.basicConfig( logging.basicConfig(
format='[%(asctime)s] [%(name)s] %(levelname)s: %(message)s', format='[%(name)s] %(levelname)s: %(message)s',
level=logging.INFO level=logging.INFO
) )
logger = logging.getLogger('cli') logger = logging.getLogger('cli')
# todo logger with QueueHandler/QueueListener
# todo custom formatter for cli logger (clean info, highlighted error/warning)
class LegendaryCLI: class LegendaryCLI:
def __init__(self): def __init__(self):
self.core = LegendaryCore() self.core = LegendaryCore()
self.logger = logging.getLogger('cli') self.logger = logging.getLogger('cli')
self.logging_queue = None
def setup_threaded_logging(self):
self.logging_queue = MPQueue(-1)
shandler = logging.StreamHandler()
sformatter = logging.Formatter('[%(asctime)s] [%(name)s] %(levelname)s: %(message)s')
shandler.setFormatter(sformatter)
ql = QueueListener(self.logging_queue, shandler)
ql.start()
return ql
def auth(self, args): def auth(self, args):
try: try:
@ -223,6 +232,10 @@ class LegendaryCLI:
start_t = time.time() start_t = time.time()
try: try:
# set up logging stuff (should be moved somewhere else later)
dlm.logging_queue = self.logging_queue
dlm.proc_debug = args.dlm_debug
dlm.start() dlm.start()
dlm.join() dlm.join()
except Exception as e: except Exception as e:
@ -243,7 +256,7 @@ class LegendaryCLI:
print(f' - {dlc.app_title} (App name: {dlc.app_name}, version: {dlc.app_version})') print(f' - {dlc.app_title} (App name: {dlc.app_name}, version: {dlc.app_version})')
# todo recursively call install with modified args to install DLC automatically (after confirm) # todo recursively call install with modified args to install DLC automatically (after confirm)
print('Installing DLCs works the same as the main game, just use the DLC app name instead.') print('Installing DLCs works the same as the main game, just use the DLC app name instead.')
print('Automatic installation of DLC is currently not supported.') print('(Automatic installation of DLC is currently not supported.)')
if postinstall: if postinstall:
self._handle_postinstall(postinstall, igame, yes=args.yes) self._handle_postinstall(postinstall, igame, yes=args.yes)
@ -354,6 +367,8 @@ def main():
help='Do not mark game as intalled and do not run prereq installers after download.') help='Do not mark game as intalled and do not run prereq installers after download.')
install_parser.add_argument('--update-only', dest='update_pnly', action='store_true', install_parser.add_argument('--update-only', dest='update_pnly', action='store_true',
help='Abort if game is not already installed (for automation)') help='Abort if game is not already installed (for automation)')
install_parser.add_argument('--dlm-debug', dest='dlm_debug', action='store_true',
help='Set download manager and worker processes\' loglevel to debug')
launch_parser.add_argument('--offline', dest='offline', action='store_true', launch_parser.add_argument('--offline', dest='offline', action='store_true',
default=False, help='Skip login and launch game without online authentication') default=False, help='Skip login and launch game without online authentication')
@ -381,6 +396,7 @@ def main():
return return
cli = LegendaryCLI() cli = LegendaryCLI()
ql = cli.setup_threaded_logging()
config_ll = cli.core.lgd.config.get('Legendary', 'log_level', fallback='info') config_ll = cli.core.lgd.config.get('Legendary', 'log_level', fallback='info')
if config_ll == 'debug' or args.debug: if config_ll == 'debug' or args.debug:
@ -391,23 +407,28 @@ def main():
# technically args.func() with setdefaults could work (see docs on subparsers) # technically args.func() with setdefaults could work (see docs on subparsers)
# but that would require all funcs to accept args and extra... # but that would require all funcs to accept args and extra...
if args.subparser_name == 'auth': try:
cli.auth(args) if args.subparser_name == 'auth':
elif args.subparser_name == 'list-games': cli.auth(args)
cli.list_games() elif args.subparser_name == 'list-games':
elif args.subparser_name == 'list-installed': cli.list_games()
cli.list_installed(args) elif args.subparser_name == 'list-installed':
elif args.subparser_name == 'launch': cli.list_installed(args)
cli.launch_game(args, extra) elif args.subparser_name == 'launch':
elif args.subparser_name == 'download': cli.launch_game(args, extra)
cli.install_game(args) elif args.subparser_name == 'download':
elif args.subparser_name == 'uninstall': cli.install_game(args)
cli.uninstall_game(args) elif args.subparser_name == 'uninstall':
cli.uninstall_game(args)
except KeyboardInterrupt:
logger.info('Command was aborted via KeyboardInterrupt, cleaning up...')
cli.core.exit() cli.core.exit()
ql.stop()
exit(0) exit(0)
if __name__ == '__main__': if __name__ == '__main__':
multiprocessing.freeze_support() # required for pyinstaller # required for pyinstaller on Windows, does nothing on other platforms.
freeze_support()
main() main()

View file

@ -8,6 +8,7 @@ import os
import time import time
from collections import Counter, defaultdict, deque from collections import Counter, defaultdict, deque
from logging.handlers import QueueHandler
from multiprocessing import cpu_count, Process, Queue as MPQueue from multiprocessing import cpu_count, Process, Queue as MPQueue
from multiprocessing.shared_memory import SharedMemory from multiprocessing.shared_memory import SharedMemory
from queue import Empty from queue import Empty
@ -24,14 +25,15 @@ class DLManager(Process):
max_jobs=100, max_failures=5, max_workers=0, update_interval=1.0, max_jobs=100, max_failures=5, max_workers=0, update_interval=1.0,
max_shared_memory=1024 * 1024 * 1024, resume_file=None): max_shared_memory=1024 * 1024 * 1024, resume_file=None):
super().__init__(name='DLManager') super().__init__(name='DLManager')
self.log = logging.getLogger('DLManager') self.log = logging.getLogger('DLM')
self.log_level = self.log.level self.proc_debug = False
self.base_url = base_url self.base_url = base_url
self.dl_dir = download_dir self.dl_dir = download_dir
self.cache_dir = cache_dir if cache_dir else os.path.join(download_dir, '.cache') self.cache_dir = cache_dir if cache_dir else os.path.join(download_dir, '.cache')
# All the queues! # All the queues!
self.logging_queue = None
self.dl_worker_queue = None self.dl_worker_queue = None
self.writer_queue = None self.writer_queue = None
self.dl_result_q = None self.dl_result_q = None
@ -63,6 +65,8 @@ class DLManager(Process):
self.running = True self.running = True
self.active_tasks = 0 self.active_tasks = 0
self.children = [] self.children = []
self.threads = []
self.conditions = []
# bytes downloaded and decompressed since last report # bytes downloaded and decompressed since last report
self.bytes_downloaded_since_last = 0 self.bytes_downloaded_since_last = 0
self.bytes_decompressed_since_last = 0 self.bytes_decompressed_since_last = 0
@ -451,19 +455,43 @@ class DLManager(Process):
if not self.analysis: if not self.analysis:
raise ValueError('Did not run analysis before trying to run download!') raise ValueError('Did not run analysis before trying to run download!')
# fix loglevel in subprocess # Subprocess will use its own root logger that logs to a Queue instead
self.log.setLevel(self.log_level) _root = logging.getLogger()
_root.setLevel(logging.DEBUG if self.proc_debug else logging.INFO)
if self.logging_queue:
_root.handlers = []
_root.addHandler(QueueHandler(self.logging_queue))
self.log = logging.getLogger('DLMProc')
self.log.info(f'Download Manager running with process-id: {os.getpid()}')
try: try:
self.run_real() self.run_real()
except KeyboardInterrupt: except KeyboardInterrupt:
self.log.warning('Immediate exit requested!') self.log.warning('Immediate exit requested!')
self.running = False self.running = False
for proc in self.children:
# send conditions to unlock threads if they aren't already
for cond in self.conditions:
with cond:
cond.notify()
# make sure threads are dead.
for t in self.threads:
t.join(timeout=5.0)
if t.is_alive():
self.log.warning(f'Thread did not terminate! {repr(t)}')
# clean up all the queues, otherwise this process won't terminate properly
for name, q in zip(('Download jobs', 'Writer jobs', 'Download results', 'Writer results'),
(self.dl_worker_queue, self.writer_queue, self.dl_result_q, self.writer_result_q)):
self.log.debug(f'Cleaning up queue "{name}"')
try: try:
proc.terminate() while True:
except Exception as e: _ = q.get_nowait()
print(f'Terminating process {repr(proc)} failed: {e!r}') except Empty:
q.close()
q.join_thread()
def run_real(self): def run_real(self):
self.shared_memory = SharedMemory(create=True, size=self.max_shared_memory) self.shared_memory = SharedMemory(create=True, size=self.max_shared_memory)
@ -478,21 +506,22 @@ class DLManager(Process):
self.log.debug(f'Created {len(self.sms)} shared memory segments.') self.log.debug(f'Created {len(self.sms)} shared memory segments.')
# Create queues # Create queues
self.dl_worker_queue = MPQueue() self.dl_worker_queue = MPQueue(-1)
self.writer_queue = MPQueue() self.writer_queue = MPQueue(-1)
self.dl_result_q = MPQueue() self.dl_result_q = MPQueue(-1)
self.writer_result_q = MPQueue() self.writer_result_q = MPQueue(-1)
self.log.info(f'Starting download workers...') self.log.info(f'Starting download workers...')
for i in range(self.max_workers): for i in range(self.max_workers):
w = DLWorker(f'DLWorker {i + 1}', self.dl_worker_queue, w = DLWorker(f'DLWorker {i + 1}', self.dl_worker_queue, self.dl_result_q,
self.dl_result_q, self.shared_memory.name) self.shared_memory.name, logging_queue=self.logging_queue)
self.children.append(w) self.children.append(w)
w.start() w.start()
self.log.info('Starting file writing worker...') self.log.info('Starting file writing worker...')
writer_p = FileWorker(self.writer_queue, self.writer_result_q, self.dl_dir, writer_p = FileWorker(self.writer_queue, self.writer_result_q, self.dl_dir,
self.shared_memory.name, self.cache_dir) self.shared_memory.name, self.cache_dir, self.logging_queue)
self.children.append(writer_p)
writer_p.start() writer_p.start()
num_chunk_tasks = sum(isinstance(t, ChunkTask) for t in self.tasks) num_chunk_tasks = sum(isinstance(t, ChunkTask) for t in self.tasks)
@ -511,14 +540,15 @@ class DLManager(Process):
# synchronization conditions # synchronization conditions
shm_cond = Condition() shm_cond = Condition()
task_cond = Condition() task_cond = Condition()
self.conditions = [shm_cond, task_cond]
# start threads # start threads
s_time = time.time() s_time = time.time()
dlj_e = Thread(target=self.download_job_manager, args=(task_cond, shm_cond)) self.threads.append(Thread(target=self.download_job_manager, args=(task_cond, shm_cond)))
dlr_e = Thread(target=self.dl_results_handler, args=(task_cond,)) self.threads.append(Thread(target=self.dl_results_handler, args=(task_cond,)))
fwr_e = Thread(target=self.fw_results_handler, args=(shm_cond,)) self.threads.append(Thread(target=self.fw_results_handler, args=(shm_cond,)))
for t in (dlj_e, dlr_e, fwr_e): for t in self.threads:
t.start() t.start()
last_update = time.time() last_update = time.time()
@ -597,7 +627,7 @@ class DLManager(Process):
child.terminate() child.terminate()
# make sure all the threads are dead. # make sure all the threads are dead.
for t in (dlj_e, dlr_e, fwr_e): for t in self.threads:
t.join(timeout=5.0) t.join(timeout=5.0)
if t.is_alive(): if t.is_alive():
self.log.warning(f'Thread did not terminate! {repr(t)}') self.log.warning(f'Thread did not terminate! {repr(t)}')

View file

@ -6,6 +6,7 @@ import requests
import time import time
import logging import logging
from logging.handlers import QueueHandler
from multiprocessing import Process from multiprocessing import Process
from multiprocessing.shared_memory import SharedMemory from multiprocessing.shared_memory import SharedMemory
from queue import Empty from queue import Empty
@ -15,7 +16,7 @@ from legendary.models.downloading import DownloaderTaskResult, WriterTaskResult
class DLWorker(Process): class DLWorker(Process):
def __init__(self, name, queue, out_queue, shm, max_retries=5): def __init__(self, name, queue, out_queue, shm, max_retries=5, logging_queue=None):
super().__init__(name=name) super().__init__(name=name)
self.q = queue self.q = queue
self.o_q = out_queue self.o_q = out_queue
@ -25,9 +26,19 @@ class DLWorker(Process):
}) })
self.max_retries = max_retries self.max_retries = max_retries
self.shm = SharedMemory(name=shm) self.shm = SharedMemory(name=shm)
self.log = logging.getLogger('DLWorker') self.log_level = logging.getLogger().level
self.logging_queue = logging_queue
def run(self): def run(self):
# we have to fix up the logger before we can start
_root = logging.getLogger()
_root.handlers = []
_root.addHandler(QueueHandler(self.logging_queue))
logger = logging.getLogger(self.name)
logger.setLevel(self.log_level)
logger.debug(f'Download worker reporting for duty!')
empty = False empty = False
while True: while True:
try: try:
@ -35,12 +46,12 @@ class DLWorker(Process):
empty = False empty = False
except Empty: except Empty:
if not empty: if not empty:
self.log.debug(f'[{self.name}] Queue Empty, waiting for more...') logger.debug(f'[{self.name}] Queue Empty, waiting for more...')
empty = True empty = True
continue continue
if job.kill: # let worker die if job.kill: # let worker die
self.log.info(f'[{self.name}] Worker received kill signal, shutting down...') logger.debug(f'[{self.name}] Worker received kill signal, shutting down...')
break break
tries = 0 tries = 0
@ -51,19 +62,19 @@ class DLWorker(Process):
try: try:
while tries < self.max_retries: while tries < self.max_retries:
# print('Downloading', job.url) # print('Downloading', job.url)
self.log.debug(f'[{self.name}] Downloading {job.url}') logger.debug(f'[{self.name}] Downloading {job.url}')
dl_start = time.time() dl_start = time.time()
try: try:
r = self.session.get(job.url, timeout=5.0) r = self.session.get(job.url, timeout=5.0)
r.raise_for_status() r.raise_for_status()
except Exception as e: except Exception as e:
self.log.warning(f'[{self.name}] Chunk download failed ({e!r}), retrying...') logger.warning(f'[{self.name}] Chunk download failed ({e!r}), retrying...')
continue continue
dl_end = time.time() dl_end = time.time()
if r.status_code != 200: if r.status_code != 200:
self.log.warning(f'[{self.name}] Chunk download failed (Status {r.status_code}), retrying...') logger.warning(f'[{self.name}] Chunk download failed (Status {r.status_code}), retrying...')
continue continue
else: else:
compressed = len(r.content) compressed = len(r.content)
@ -72,12 +83,12 @@ class DLWorker(Process):
else: else:
raise TimeoutError('Max retries reached') raise TimeoutError('Max retries reached')
except Exception as e: except Exception as e:
self.log.error(f'[{self.name}] Job failed with: {e!r}, fetching next one...') logger.error(f'[{self.name}] Job failed with: {e!r}, fetching next one...')
# add failed job to result queue to be requeued # add failed job to result queue to be requeued
self.o_q.put(DownloaderTaskResult(success=False, chunk_guid=job.guid, shm=job.shm, url=job.url)) self.o_q.put(DownloaderTaskResult(success=False, chunk_guid=job.guid, shm=job.shm, url=job.url))
if not chunk: if not chunk:
self.log.warning(f'[{self.name}] Chunk smoehow None?') logger.warning(f'[{self.name}] Chunk smoehow None?')
self.o_q.put(DownloaderTaskResult(success=False, chunk_guid=job.guid, shm=job.shm, url=job.url)) self.o_q.put(DownloaderTaskResult(success=False, chunk_guid=job.guid, shm=job.shm, url=job.url))
continue continue
@ -85,7 +96,7 @@ class DLWorker(Process):
try: try:
size = len(chunk.data) size = len(chunk.data)
if size > job.shm.size: if size > job.shm.size:
self.log.fatal(f'Downloaded chunk is longer than SharedMemorySegment!') logger.fatal(f'Downloaded chunk is longer than SharedMemorySegment!')
self.shm.buf[job.shm.offset:job.shm.offset + size] = bytes(chunk.data) self.shm.buf[job.shm.offset:job.shm.offset + size] = bytes(chunk.data)
del chunk del chunk
@ -93,7 +104,7 @@ class DLWorker(Process):
url=job.url, size=size, compressed_size=compressed, url=job.url, size=size, compressed_size=compressed,
time_delta=dl_end - dl_start)) time_delta=dl_end - dl_start))
except Exception as e: except Exception as e:
self.log.warning(f'[{self.name}] Job failed with: {e!r}, fetching next one...') logger.warning(f'[{self.name}] Job failed with: {e!r}, fetching next one...')
self.o_q.put(DownloaderTaskResult(success=False, chunk_guid=job.guid, shm=job.shm, url=job.url)) self.o_q.put(DownloaderTaskResult(success=False, chunk_guid=job.guid, shm=job.shm, url=job.url))
continue continue
@ -101,16 +112,26 @@ class DLWorker(Process):
class FileWorker(Process): class FileWorker(Process):
def __init__(self, queue, out_queue, base_path, shm, cache_path=None): def __init__(self, queue, out_queue, base_path, shm, cache_path=None, logging_queue=None):
super().__init__(name='File worker') super().__init__(name='FileWorker')
self.q = queue self.q = queue
self.o_q = out_queue self.o_q = out_queue
self.base_path = base_path self.base_path = base_path
self.cache_path = cache_path if cache_path else os.path.join(base_path, '.cache') self.cache_path = cache_path if cache_path else os.path.join(base_path, '.cache')
self.shm = SharedMemory(name=shm) self.shm = SharedMemory(name=shm)
self.log = logging.getLogger('DLWorker') self.log_level = logging.getLogger().level
self.logging_queue = logging_queue
def run(self): def run(self):
# we have to fix up the logger before we can start
_root = logging.getLogger()
_root.handlers = []
_root.addHandler(QueueHandler(self.logging_queue))
logger = logging.getLogger(self.name)
logger.setLevel(self.log_level)
logger.debug(f'Download worker reporting for duty!')
last_filename = '' last_filename = ''
current_file = None current_file = None
@ -119,7 +140,7 @@ class FileWorker(Process):
try: try:
j = self.q.get(timeout=10.0) j = self.q.get(timeout=10.0)
except Empty: except Empty:
self.log.warning('Writer queue empty!') logger.warning('Writer queue empty!')
continue continue
if j.kill: if j.kill:
@ -141,7 +162,7 @@ class FileWorker(Process):
continue continue
elif j.open: elif j.open:
if current_file: if current_file:
self.log.warning(f'Opening new file {j.filename} without closing previous! {last_filename}') logger.warning(f'Opening new file {j.filename} without closing previous! {last_filename}')
current_file.close() current_file.close()
current_file = open(full_path, 'wb') current_file = open(full_path, 'wb')
@ -154,27 +175,27 @@ class FileWorker(Process):
current_file.close() current_file.close()
current_file = None current_file = None
else: else:
self.log.warning(f'Asking to close file that is not open: {j.filename}') logger.warning(f'Asking to close file that is not open: {j.filename}')
self.o_q.put(WriterTaskResult(success=True, filename=j.filename, closed=True)) self.o_q.put(WriterTaskResult(success=True, filename=j.filename, closed=True))
continue continue
elif j.rename: elif j.rename:
if current_file: if current_file:
self.log.warning('Trying to rename file without closing first!') logger.warning('Trying to rename file without closing first!')
current_file.close() current_file.close()
current_file = None current_file = None
if j.delete: if j.delete:
try: try:
os.remove(full_path) os.remove(full_path)
except OSError as e: except OSError as e:
self.log.error(f'Removing file failed: {e!r}') logger.error(f'Removing file failed: {e!r}')
self.o_q.put(WriterTaskResult(success=False, filename=j.filename)) self.o_q.put(WriterTaskResult(success=False, filename=j.filename))
continue continue
try: try:
os.rename(os.path.join(self.base_path, j.old_filename), full_path) os.rename(os.path.join(self.base_path, j.old_filename), full_path)
except OSError as e: except OSError as e:
self.log.error(f'Renaming file failed: {e!r}') logger.error(f'Renaming file failed: {e!r}')
self.o_q.put(WriterTaskResult(success=False, filename=j.filename)) self.o_q.put(WriterTaskResult(success=False, filename=j.filename))
continue continue
@ -182,14 +203,14 @@ class FileWorker(Process):
continue continue
elif j.delete: elif j.delete:
if current_file: if current_file:
self.log.warning('Trying to delete file without closing first!') logger.warning('Trying to delete file without closing first!')
current_file.close() current_file.close()
current_file = None current_file = None
try: try:
os.remove(full_path) os.remove(full_path)
except OSError as e: except OSError as e:
self.log.error(f'Removing file failed: {e!r}') logger.error(f'Removing file failed: {e!r}')
self.o_q.put(WriterTaskResult(success=True, filename=j.filename)) self.o_q.put(WriterTaskResult(success=True, filename=j.filename))
continue continue
@ -218,7 +239,7 @@ class FileWorker(Process):
current_file.write(f.read(j.chunk_size)) current_file.write(f.read(j.chunk_size))
post_write = time.time() post_write = time.time()
except Exception as e: except Exception as e:
self.log.warning(f'Something in writing a file failed: {e!r}') logger.warning(f'Something in writing a file failed: {e!r}')
self.o_q.put(WriterTaskResult(success=False, filename=j.filename, self.o_q.put(WriterTaskResult(success=False, filename=j.filename,
chunk_guid=j.chunk_guid, chunk_guid=j.chunk_guid,
release_memory=j.release_memory, release_memory=j.release_memory,
@ -231,7 +252,7 @@ class FileWorker(Process):
shm=j.shm, size=j.chunk_size, shm=j.shm, size=j.chunk_size,
time_delta=post_write-pre_write)) time_delta=post_write-pre_write))
except Exception as e: except Exception as e:
self.log.warning(f'[{self.name}] Job {j.filename} failed with: {e!r}, fetching next one...') logger.warning(f'[{self.name}] Job {j.filename} failed with: {e!r}, fetching next one...')
self.o_q.put(WriterTaskResult(success=False, filename=j.filename, chunk_guid=j.chunk_guid)) self.o_q.put(WriterTaskResult(success=False, filename=j.filename, chunk_guid=j.chunk_guid))
try: try:
@ -239,7 +260,7 @@ class FileWorker(Process):
current_file.close() current_file.close()
current_file = None current_file = None
except Exception as e: except Exception as e:
self.log.error(f'[{self.name}] Closing file after error failed: {e!r}') logger.error(f'[{self.name}] Closing file after error failed: {e!r}')
except KeyboardInterrupt: except KeyboardInterrupt:
if current_file: if current_file:
current_file.close() current_file.close()