Skip to content

Commit

Permalink
Merge branch 'master' into deldesir-patch-5
Browse files Browse the repository at this point in the history
  • Loading branch information
deldesir authored Jul 5, 2024
2 parents fda41c5 + b5e3c76 commit aab3a75
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 9 deletions.
2 changes: 0 additions & 2 deletions cps/editbooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -389,7 +389,6 @@ def move_mediafile(requested_file, current_user_name=None, shelf_id=None):
db_book,
input_authors,
title_dir,
renamed_authors,
) = create_book_on_upload(modify_date, meta)

# Comments need book id therefore only possible after flush
Expand All @@ -406,7 +405,6 @@ def move_mediafile(requested_file, current_user_name=None, shelf_id=None):
input_authors[0],
meta.file_path,
title_dir + meta.extension.lower(),
renamed_author=renamed_authors,
)

move_coverfile(meta, db_book)
Expand Down
10 changes: 7 additions & 3 deletions cps/tasks/download.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,16 @@
log = logger.create()

class TaskDownload(CalibreTask):
def __init__(self, task_message, media_url, original_url, current_user_name, shelf_id):
def __init__(self, task_message, media_url, original_url, current_user_name, shelf_id, duration, live_status):
super(TaskDownload, self).__init__(task_message)
self.message = task_message
self.media_url = media_url
self.media_url_link = f'<a href="{media_url}" target="_blank">{media_url}</a>'
self.original_url = original_url
self.current_user_name = current_user_name
self.shelf_id = shelf_id
self.duration = datetime.utcfromtimestamp(int(duration)).strftime("%H:%M:%S") if duration else "unknown"
self.live_status = live_status
self.start_time = self.end_time = datetime.now()
self.stat = STAT_WAITING
self.progress = 0
Expand Down Expand Up @@ -54,6 +56,9 @@ def run(self, worker_thread):
last_progress_time = datetime.now()
fragment_stuck_timeout = 30 # seconds

self.message = f"Downloading {self.media_url_link}..."
if self.live_status == "was_live":
self.message += f" (formerly live video, length/duration is {self.duration} seconds)"
while p.poll() is None:
self.end_time = datetime.now()
# Check if there's data available to read
Expand All @@ -69,15 +74,14 @@ def run(self, worker_thread):
elif re.search(pattern_progress, line):
percentage = int(re.search(r'\d+', line).group())
if percentage < 100:
self.message = f"Downloading {self.media_url_link}..."
self.progress = min(0.99, (complete_progress_cycle + (percentage / 100)) / 4)
if percentage == 100:
complete_progress_cycle += 1
last_progress_time = datetime.now()
else:
elapsed_time = (datetime.now() - last_progress_time).total_seconds()
if elapsed_time >= fragment_stuck_timeout:
self.message = f"Downloading {self.media_url_link}... (This is taking longer than expected)"
self.message += f"<br>Some fragments are taking longer than expected to download. Please wait..."

sleep(0.1)

Expand Down
15 changes: 11 additions & 4 deletions cps/tasks/metadata_extract.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,9 +68,9 @@ def _fetch_requested_urls(self, conn):
else "SELECT path, duration, live_status FROM media WHERE path LIKE 'http%' AND time_created > ?")
rows = conn.execute(query, (int(self.start_time.timestamp()),)).fetchall()
requested_urls = {}
for path, duration in rows:
for path, duration, live_status in rows:
if duration is not None and duration > 0:
requested_urls[path] = {"duration": duration}
requested_urls[path] = {"duration": duration, "live_status": live_status}
else:
self.unavailable.append(path)
return requested_urls
Expand Down Expand Up @@ -125,10 +125,10 @@ def _sort_and_limit_requested_urls(self, requested_urls):
return dict(sorted(requested_urls.items(), key=lambda item: item[1]["views_per_day"], reverse=True)[:min(MAX_VIDEOS_PER_DOWNLOAD, len(requested_urls))])

def _add_download_tasks_to_worker(self, requested_urls):
for index, requested_url in enumerate(requested_urls.keys()):
for index, (requested_url, url_data) in enumerate(requested_urls.items()):
task_download = TaskDownload(_("Downloading %(url)s...", url=requested_url),
requested_url, self.original_url,
self.current_user_name, self.shelf_id)
self.current_user_name, self.shelf_id, duration=str(url_data["duration"]), live_status=url_data["live_status"])
WorkerThread.add(self.current_user_name, task_download)
num_requested_urls = len(requested_urls)
total_duration = sum(url_data["duration"] for url_data in requested_urls.values())
Expand All @@ -140,6 +140,13 @@ def _add_download_tasks_to_worker(self, requested_urls):
self.message += f"<br><br>Shelf Title: <a href='{shelf_url}' target='_blank'>{self.shelf_title}</a>"
if self.unavailable:
self.message += "<br><br>Unavailable Video(s):<br>" + "<br>".join(f'<a href="{url}" target="_blank">{url}</a>' for url in self.unavailable)
upcoming_live_urls = [url for url, url_data in requested_urls.items() if url_data["live_status"] == "is_upcoming"]
live_urls = [url for url, url_data in requested_urls.items() if url_data["live_status"] == "is_live"]
if upcoming_live_urls:
self.message += "<br><br>Upcoming Live Video(s):<br>" + "<br>".join(f'<a href="{url}" target="_blank">{url}</a>' for url in upcoming_live_urls)
if live_urls:
self.message += "<br><br>Live Video(s):<br>" + "<br>".join(f'<a href="{url}" target="_blank">{url}</a>' for url in live_urls)


def run(self, worker_thread):
self.worker_thread = worker_thread
Expand Down

0 comments on commit aab3a75

Please sign in to comment.