Skip to content

Commit

Permalink
fix: moved all uploads to use upload_file
Browse files Browse the repository at this point in the history
  • Loading branch information
rainmanjam committed Oct 11, 2024
1 parent a7e50dd commit 81c98ca
Show file tree
Hide file tree
Showing 6 changed files with 53 additions and 60 deletions.
3 changes: 0 additions & 3 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,5 @@ google-auth-httplib2
google-api-python-client
google-cloud-storage
psutil
<<<<<<< HEAD
boto3
=======
Pillow
>>>>>>> build
22 changes: 11 additions & 11 deletions routes/audio_mixing.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,9 @@
from flask import Blueprint, request, jsonify
from flask import current_app
from flask import Blueprint
from app_utils import *
import uuid
import threading
import logging
from services.audio_mixing import process_audio_mixing
from services.authentication import authenticate

from services.gcp_toolkit import upload_to_gcs # Ensure this import is present
from services.cloud_storage import upload_file

audio_mixing_bp = Blueprint('audio_mixing', __name__)
logger = logging.getLogger(__name__)
Expand All @@ -30,7 +26,6 @@
})
@queue_task_wrapper(bypass_queue=False)
def audio_mixing(job_id, data):

video_url = data.get('video_url')
audio_url = data.get('audio_url')
video_vol = data.get('video_vol', 100)
Expand All @@ -42,14 +37,19 @@ def audio_mixing(job_id, data):
logger.info(f"Job {job_id}: Received audio mixing request for {video_url} and {audio_url}")

try:
# Process audio and video mixing
output_filename = process_audio_mixing(
video_url, audio_url, video_vol, audio_vol, output_length, job_id, webhook_url
)
gcs_url = upload_to_gcs(output_filename)

return gcs_url, "/audio-mixing", 200
# Upload the mixed file using the unified upload_file() method
cloud_url = upload_file(output_filename)

logger.info(f"Job {job_id}: Mixed media uploaded to cloud storage: {cloud_url}")

# Return the cloud URL for the uploaded file
return cloud_url, "/audio-mixing", 200

except Exception as e:

logger.error(f"Job {job_id}: Error during audio mixing process - {str(e)}")
return str(e), "/audio-mixing", 500

34 changes: 8 additions & 26 deletions routes/caption_video.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,7 @@
import logging
from services.caption_video import process_captioning
from services.authentication import authenticate
from services.gcp_toolkit import upload_to_gcs, GCP_BUCKET_NAME, gcs_client
from services.s3_toolkit import upload_to_s3
from services.cloud_storage import upload_file
import os

caption_bp = Blueprint('caption', __name__)
Expand Down Expand Up @@ -39,7 +38,7 @@
def caption_video(job_id, data):
video_url = data['video_url']
caption_srt = data['srt']
caption_type= data.get('caption_type', 'srt')
caption_type = data.get('caption_type', 'srt')
options = data.get('options', [])
webhook_url = data.get('webhook_url')
id = data.get('id')
Expand All @@ -48,34 +47,17 @@ def caption_video(job_id, data):
logger.info(f"Job {job_id}: Options received: {options}")

try:
# Process captioning
output_filename = process_captioning(video_url, caption_srt, caption_type, options, job_id)
logger.info(f"Job {job_id}: Captioning process completed successfully")

# Determine which storage provider to use
s3_url = os.getenv('S3_URL')
s3_access_key = os.getenv('S3_ACCESS_KEY')
s3_secret_key = os.getenv('S3_SECRET_KEY')
gcp_bucket_name = os.getenv('GCP_BUCKET_NAME')
# Upload the captioned video using the unified upload_file() method
cloud_url = upload_file(output_filename)

if s3_url and s3_access_key and s3_secret_key:
# Log S3 environment variables for debugging
logger.info(f"Job {job_id}: S3_URL={s3_url}, S3_ACCESS_KEY={s3_access_key}, S3_SECRET_KEY={s3_secret_key}")

# Upload to S3
cloud_url = upload_to_s3(output_filename, s3_url, s3_access_key, s3_secret_key)
elif gcp_bucket_name and gcs_client:
# Log GCP environment variables for debugging
logger.info(f"Job {job_id}: GCP_BUCKET_NAME={gcp_bucket_name}")

# Upload to GCS
cloud_url = upload_to_gcs(output_filename, gcp_bucket_name)
else:
raise ValueError("No valid storage provider is configured. Ensure either S3 or GCP environment variables are set.")

logger.info(f"Job {job_id}: File uploaded to cloud storage: {cloud_url}")
logger.info(f"Job {job_id}: Captioned video uploaded to cloud storage: {cloud_url}")

# Return the cloud URL for the uploaded file
return cloud_url, "/caption-video", 200

except Exception as e:
logger.error(f"Job {job_id}: Error during captioning process - {str(e)}", exc_info=True)
return str(e), "/caption-video", 500
return str(e), "/caption-video", 500
24 changes: 16 additions & 8 deletions routes/extract_keyframes.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
from flask import Blueprint, request, jsonify
from flask import current_app
from flask import Blueprint
from app_utils import *
import logging
from services.extract_keyframes import process_keyframe_extraction
from services.authentication import authenticate
from services.gcp_toolkit import upload_to_gcs
from services.cloud_storage import upload_file

extract_keyframes_bp = Blueprint('extract_keyframes', __name__)
logger = logging.getLogger(__name__)
Expand All @@ -30,11 +29,20 @@ def extract_keyframes(job_id, data):
logger.info(f"Job {job_id}: Received keyframe extraction request for {video_url}")

try:
image_urls = process_keyframe_extraction(video_url, job_id)
response = {"image_urls": [{"image_url": url} for url in image_urls]}

return response, "/extract-keyframes", 200
# Process keyframe extraction
image_paths = process_keyframe_extraction(video_url, job_id)

# Upload each extracted keyframe and collect the cloud URLs
image_urls = []
for image_path in image_paths:
cloud_url = upload_file(image_path)
image_urls.append({"image_url": cloud_url})

logger.info(f"Job {job_id}: Keyframes uploaded to cloud storage")

# Return the URLs of the uploaded keyframes
return {"image_urls": image_urls}, "/extract-keyframes", 200

except Exception as e:
logger.error(f"Job {job_id}: Error during keyframe extraction - {str(e)}")
return str(e), "/extract-keyframes", 500
return str(e), "/extract-keyframes", 500
20 changes: 13 additions & 7 deletions routes/image_to_video.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
from flask import Blueprint, request, jsonify
from flask import current_app
from flask import Blueprint
from app_utils import *
import logging
from services.image_to_video import process_image_to_video
from services.authentication import authenticate
from services.gcp_toolkit import upload_to_gcs
from services.cloud_storage import upload_file

image_to_video_bp = Blueprint('image_to_video', __name__)
logger = logging.getLogger(__name__)
Expand All @@ -29,20 +28,27 @@ def image_to_video(job_id, data):
image_url = data.get('image_url')
length = data.get('length', 5)
frame_rate = data.get('frame_rate', 30)
zoom_speed = data.get('zoom_speed', 3)/100
zoom_speed = data.get('zoom_speed', 3) / 100
webhook_url = data.get('webhook_url')
id = data.get('id')

logger.info(f"Job {job_id}: Received image to video request for {image_url}")

try:
# Process image to video conversion
output_filename = process_image_to_video(
image_url, length, frame_rate, zoom_speed, job_id, webhook_url
)
gcs_url = upload_to_gcs(output_filename)

return gcs_url, "/image-to-video", 200
# Upload the resulting file using the unified upload_file() method
cloud_url = upload_file(output_filename)

# Log the successful upload
logger.info(f"Job {job_id}: Converted video uploaded to cloud storage: {cloud_url}")

# Return the cloud URL for the uploaded file
return cloud_url, "/image-to-video", 200

except Exception as e:
logger.error(f"Job {job_id}: Error processing image to video: {str(e)}", exc_info=True)
return str(e), "/image-to-video", 500
return str(e), "/image-to-video", 500
10 changes: 5 additions & 5 deletions routes/transcribe_media.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import os
from services.transcription import process_transcription
from services.authentication import authenticate
from services.gcp_toolkit import upload_to_gcs
from services.cloud_storage import upload_file

transcribe_bp = Blueprint('transcribe', __name__)
logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -37,14 +37,14 @@ def transcribe(job_id, data):
result = process_transcription(media_url, output, max_chars)
logger.info(f"Job {job_id}: Transcription process completed successfully")

# If the result is a file path, upload it to GCS
# If the result is a file path, upload it using the unified upload_file() method
if output in ['srt', 'vtt', 'ass']:
gcs_url = upload_to_gcs(result)
cloud_url = upload_file(result)
os.remove(result) # Remove the temporary file after uploading
return gcs_url, "/transcribe-media", 200
return cloud_url, "/transcribe-media", 200
else:
return result, "/transcribe-media", 200

except Exception as e:
logger.error(f"Job {job_id}: Error during transcription process - {str(e)}")
return str(e), "/transcribe-media", 500
return str(e), "/transcribe-media", 500

0 comments on commit 81c98ca

Please sign in to comment.