diff --git a/cps/editbooks.py b/cps/editbooks.py
index 5764be589f..ae8ad698a5 100644
--- a/cps/editbooks.py
+++ b/cps/editbooks.py
@@ -389,7 +389,6 @@ def move_mediafile(requested_file, current_user_name=None, shelf_id=None):
db_book,
input_authors,
title_dir,
- renamed_authors,
) = create_book_on_upload(modify_date, meta)
# Comments need book id therefore only possible after flush
@@ -406,7 +405,6 @@ def move_mediafile(requested_file, current_user_name=None, shelf_id=None):
input_authors[0],
meta.file_path,
title_dir + meta.extension.lower(),
- renamed_author=renamed_authors,
)
move_coverfile(meta, db_book)
diff --git a/cps/tasks/download.py b/cps/tasks/download.py
index 50bbd8dfff..1db4d694a3 100644
--- a/cps/tasks/download.py
+++ b/cps/tasks/download.py
@@ -15,7 +15,7 @@
log = logger.create()
class TaskDownload(CalibreTask):
- def __init__(self, task_message, media_url, original_url, current_user_name, shelf_id):
+ def __init__(self, task_message, media_url, original_url, current_user_name, shelf_id, duration, live_status):
super(TaskDownload, self).__init__(task_message)
self.message = task_message
self.media_url = media_url
@@ -23,6 +23,8 @@ def __init__(self, task_message, media_url, original_url, current_user_name, she
self.original_url = original_url
self.current_user_name = current_user_name
self.shelf_id = shelf_id
+ self.duration = datetime.utcfromtimestamp(int(duration)).strftime("%H:%M:%S") if duration else "unknown"
+ self.live_status = live_status
self.start_time = self.end_time = datetime.now()
self.stat = STAT_WAITING
self.progress = 0
@@ -54,6 +56,9 @@ def run(self, worker_thread):
last_progress_time = datetime.now()
fragment_stuck_timeout = 30 # seconds
+ self.message = f"Downloading {self.media_url_link}..."
+ if self.live_status == "was_live":
+ self.message += f" (formerly live video, length/duration is {self.duration} seconds)"
while p.poll() is None:
self.end_time = datetime.now()
# Check if there's data available to read
@@ -69,7 +74,6 @@ def run(self, worker_thread):
elif re.search(pattern_progress, line):
percentage = int(re.search(r'\d+', line).group())
if percentage < 100:
- self.message = f"Downloading {self.media_url_link}..."
self.progress = min(0.99, (complete_progress_cycle + (percentage / 100)) / 4)
if percentage == 100:
complete_progress_cycle += 1
@@ -77,7 +81,7 @@ def run(self, worker_thread):
else:
elapsed_time = (datetime.now() - last_progress_time).total_seconds()
if elapsed_time >= fragment_stuck_timeout:
- self.message = f"Downloading {self.media_url_link}... (This is taking longer than expected)"
+ self.message += f"
Some fragments are taking longer than expected to download. Please wait..."
sleep(0.1)
diff --git a/cps/tasks/metadata_extract.py b/cps/tasks/metadata_extract.py
index 7aa7e92894..a69443b766 100644
--- a/cps/tasks/metadata_extract.py
+++ b/cps/tasks/metadata_extract.py
@@ -68,9 +68,9 @@ def _fetch_requested_urls(self, conn):
else "SELECT path, duration, live_status FROM media WHERE path LIKE 'http%' AND time_created > ?")
rows = conn.execute(query, (int(self.start_time.timestamp()),)).fetchall()
requested_urls = {}
- for path, duration in rows:
+ for path, duration, live_status in rows:
if duration is not None and duration > 0:
- requested_urls[path] = {"duration": duration}
+ requested_urls[path] = {"duration": duration, "live_status": live_status}
else:
self.unavailable.append(path)
return requested_urls
@@ -125,10 +125,10 @@ def _sort_and_limit_requested_urls(self, requested_urls):
return dict(sorted(requested_urls.items(), key=lambda item: item[1]["views_per_day"], reverse=True)[:min(MAX_VIDEOS_PER_DOWNLOAD, len(requested_urls))])
def _add_download_tasks_to_worker(self, requested_urls):
- for index, requested_url in enumerate(requested_urls.keys()):
+ for index, (requested_url, url_data) in enumerate(requested_urls.items()):
task_download = TaskDownload(_("Downloading %(url)s...", url=requested_url),
requested_url, self.original_url,
- self.current_user_name, self.shelf_id)
+ self.current_user_name, self.shelf_id, duration=str(url_data["duration"]), live_status=url_data["live_status"])
WorkerThread.add(self.current_user_name, task_download)
num_requested_urls = len(requested_urls)
total_duration = sum(url_data["duration"] for url_data in requested_urls.values())
@@ -140,6 +140,13 @@ def _add_download_tasks_to_worker(self, requested_urls):
self.message += f"
Shelf Title: {self.shelf_title}"
if self.unavailable:
self.message += "
Unavailable Video(s):
" + "
".join(f'{url}' for url in self.unavailable)
+ upcoming_live_urls = [url for url, url_data in requested_urls.items() if url_data["live_status"] == "is_upcoming"]
+ live_urls = [url for url, url_data in requested_urls.items() if url_data["live_status"] == "is_live"]
+ if upcoming_live_urls:
+ self.message += "
Upcoming Live Video(s):
" + "
".join(f'{url}' for url in upcoming_live_urls)
+ if live_urls:
+ self.message += "
Live Video(s):
" + "
".join(f'{url}' for url in live_urls)
+
def run(self, worker_thread):
self.worker_thread = worker_thread