Thumbnail and metadata extraction

pull/9/head
Logan Williams 2021-03-18 11:03:13 +01:00
rodzic 99870d5287
commit 9070689d95
3 zmienionych plików z 170 dodań i 52 usunięć

Wyświetl plik

@ -9,6 +9,7 @@ boto3 = "*"
python-dotenv = "*"
youtube_dl = "*"
argparse = "*"
ffmpeg-python = "*"
[dev-packages]

65
Pipfile.lock wygenerowano
Wyświetl plik

@ -1,7 +1,7 @@
{
"_meta": {
"hash": {
"sha256": "f8d5bda536d37e5fca14f05a2b6102b002ee5846e84298175ba319d20bd41d60"
"sha256": "a5308ff7514ddcff08e39ae06fe8f96e63ea3eecfbf4c106c907c8aa6b76b8dd"
},
"pipfile-spec": 6,
"requires": {
@ -26,19 +26,19 @@
},
"boto3": {
"hashes": [
"sha256:65514427f5f849245c9a272fa06a5a014ae3945333f4f407489d034fb99dc61f",
"sha256:af87efaa772f95de67f72ed91aed2feef63593b5290696f669799202bc484b99"
"sha256:d39c04b51e60197f5503f8489f043bc904981567cc8431d389367767dc3fd5ae",
"sha256:fe1898c5b10035528207995c9931b78f2f50bb70cf93bac353152aea47c04780"
],
"index": "pypi",
"version": "==1.17.4"
"version": "==1.17.30"
},
"botocore": {
"hashes": [
"sha256:61657a1e4b3cdda9627084184bdf9dca4637c1523daead31a36974be0d51686d",
"sha256:96f9e0920ac91b6caae3039e5de09b80648ad57b4a97fc7d81a369afae34fb10"
"sha256:63951595a736dfc9759f57e33bec6eaea4f09c4800626ef5309437060b263e48",
"sha256:98ff1eb210d394a1ffe736b33c8a7be68f30f0a03550b559c5bb6fdf0c29328d"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'",
"version": "==1.20.4"
"version": "==1.20.30"
},
"cachetools": {
"hashes": [
@ -63,29 +63,44 @@
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
"version": "==4.0.0"
},
"ffmpeg-python": {
"hashes": [
"sha256:65225db34627c578ef0e11c8b1eb528bb35e024752f6f10b78c011f6f64c4127",
"sha256:ac441a0404e053f8b6a1113a77c0f452f1cfc62f6344a769475ffdc0f56c23c5"
],
"index": "pypi",
"version": "==0.2.0"
},
"future": {
"hashes": [
"sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d"
],
"markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==0.18.2"
},
"google-auth": {
"hashes": [
"sha256:008e23ed080674f69f9d2d7d80db4c2591b9bb307d136cea7b3bc129771d211d",
"sha256:514e39f4190ca972200ba33876da5a8857c5665f2b4ccc36c8b8ee21228aae80"
"sha256:9bd436d19ab047001a1340720d2b629eb96dd503258c524921ec2af3ee88a80e",
"sha256:dcaba3aa9d4e0e96fd945bf25a86b6f878fcb05770b67adbeb50a63ca4d28a5e"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'",
"version": "==1.25.0"
"version": "==1.28.0"
},
"google-auth-oauthlib": {
"hashes": [
"sha256:65b65bc39ad8cab15039b35e5898455d3d66296d0584d96fe0e79d67d04c51d9",
"sha256:d4d98c831ea21d574699978827490a41b94f05d565c617fe1b420e88f1fc8d8d"
"sha256:54431535309cfab50897d9c181e8c2226268825aa6e42e930b05b99c5041a18c",
"sha256:dabffbf594a6be2fd6d054060846d1201569252efb10dfb749b504a7591f8af0"
],
"markers": "python_version >= '3.6'",
"version": "==0.4.2"
"version": "==0.4.3"
},
"gspread": {
"hashes": [
"sha256:273da28275eb8dc664b1ca944e59255949d75ac3cac62d65797003dbb419a2cd",
"sha256:e04f1a6267b3929fc1600424c5ec83906d439672cafdd61a9d5b916a139f841c"
"sha256:056ceb9fb4f439c15ec39d84c91653c6435f775a1c8afc8fe7f909f8393821fb",
"sha256:4bda4ab8c5edb9e41cf4ae40d4d5fb30447522b4e43608e05c01351ab1b96912"
],
"index": "pypi",
"version": "==3.6.0"
"version": "==3.7.0"
},
"idna": {
"hashes": [
@ -181,11 +196,11 @@
},
"rsa": {
"hashes": [
"sha256:69805d6b69f56eb05b62daea3a7dbd7aa44324ad1306445e05da8060232d00f4",
"sha256:a8774e55b59fd9fc893b0d05e9bfc6f47081f46ff5b46f39ccf24631b7be356b"
"sha256:78f9a9bf4e7be0c5ded4583326e7461e3a3c5aae24073648b4bdfa797d78c9d2",
"sha256:9d689e6ca1b3038bc82bf8d23e944b6b6037bc02301a574935b2dd946e0353b9"
],
"markers": "python_version >= '3.6'",
"version": "==4.7"
"version": "==4.7.2"
},
"s3transfer": {
"hashes": [
@ -204,19 +219,19 @@
},
"urllib3": {
"hashes": [
"sha256:1b465e494e3e0d8939b50680403e3aedaa2bc434b7d5af64dfd3c958d7f5ae80",
"sha256:de3eedaad74a2683334e282005cd8d7f22f4d55fa690a2a1020a416cb0a47e73"
"sha256:2f4da4594db7e1e110a944bb1b551fdf4e6c136ad42e4234131391e21eb5b0df",
"sha256:e7b021f7241115872f92f43c6508082facffbd1c048e3c6e2bb9c2a157e28937"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4'",
"version": "==1.26.3"
"version": "==1.26.4"
},
"youtube-dl": {
"hashes": [
"sha256:831a29b2d34493ef8181ff288f403135bb4b00df1cd201eb8cbe80b5b5425760",
"sha256:b337f20563094decc6b3c16e6fcad14ec3df9d99519344d6e95e40878b2c8075"
"sha256:c287ad8dd33471aabaabab5ab1dd825bebc70eb8b83ebfa93fd71022e01a1d08",
"sha256:d414166efe52447877db06803816277f52f405faeee2bdf5ef816b30e352b3b8"
],
"index": "pypi",
"version": "==2021.2.4.1"
"version": "==2021.3.14"
}
},
"develop": {}

Wyświetl plik

@ -9,10 +9,10 @@ from dotenv import load_dotenv
from botocore.errorfactory import ClientError
import argparse
import math
import ffmpeg
load_dotenv()
def col_to_index(col):
col = list(col)
ndigits = len(col)
@ -27,10 +27,9 @@ def col_to_index(col):
return v - 1
def index_to_col(index):
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if index > 25:
t = index
dig = 0
@ -41,10 +40,61 @@ def index_to_col(index):
else:
return alphabet[index]
def get_thumbnails(filename, s3_client):
if not os.path.exists(filename.split('.')[0]):
os.mkdir(filename.split('.')[0])
stream = ffmpeg.input(filename)
stream = ffmpeg.filter(stream, 'fps', fps=0.5).filter('scale', 512, -1)
stream.output(filename.split('.')[0] + '/out%d.jpg').run()
thumbnails = os.listdir(filename.split('.')[0] + '/')
cdn_urls = []
for fname in thumbnails:
thumbnail_filename = filename.split('.')[0] + '/' + fname
key = filename.split('/')[1].split('.')[0] + '/' + fname
cdn_url = 'https://{}.{}.cdn.digitaloceanspaces.com/{}'.format(
os.getenv('DO_BUCKET'), os.getenv('DO_SPACES_REGION'), key)
with open(thumbnail_filename, 'rb') as f:
s3_client.upload_fileobj(f, Bucket=os.getenv(
'DO_BUCKET'), Key=key, ExtraArgs={'ACL': 'public-read'})
cdn_urls.append(cdn_url)
os.remove(thumbnail_filename)
key_thumb = cdn_urls[int(len(cdn_urls)*0.25)]
index_page = f'''<html><head><title>{filename}</title></head>
<body>'''
for t in cdn_urls:
index_page += f'<img src="{t}" />'
index_page += f"</body></html>"
index_fname = filename.split('.')[0] + '/index.html'
with open(index_fname, 'w') as f:
f.write(index_page)
thumb_index = filename.split('/')[1].split('.')[0] + '/index.html'
s3_client.upload_fileobj(open(index_fname, 'rb'), Bucket=os.getenv(
'DO_BUCKET'), Key=thumb_index, ExtraArgs={'ACL': 'public-read', 'ContentType': 'text/html'})
thumb_index_cdn_url = 'https://{}.{}.cdn.digitaloceanspaces.com/{}'.format(
os.getenv('DO_BUCKET'), os.getenv('DO_SPACES_REGION'), thumb_index)
return (key_thumb, thumb_index_cdn_url)
def download_vid(url, s3_client, check_if_exists=False):
ydl_opts = {'outtmpl': 'tmp/%(id)s.%(ext)s', 'quiet': False}
ydl = youtube_dl.YoutubeDL(ydl_opts)
cdn_url = None
status = 'success'
if check_if_exists:
info = ydl.extract_info(url, download=False)
@ -59,14 +109,15 @@ def download_vid(url, s3_client, check_if_exists=False):
filename = ydl.prepare_filename(info)
key = filename.split('/')[1]
cdn_url = 'https://{}.{}.cdn.digitaloceanspaces.com/{}'.format(
os.getenv('DO_BUCKET'), os.getenv('DO_SPACES_REGION'), key)
try:
s3_client.head_object(Bucket=os.getenv('DO_BUCKET'), Key=key)
# file exists
return (cdn_url, 'already archived')
cdn_url = 'https://{}.{}.cdn.digitaloceanspaces.com/{}'.format(
os.getenv('DO_BUCKET'), os.getenv('DO_SPACES_REGION'), key)
status = 'already archived'
except ClientError:
pass
@ -86,41 +137,82 @@ def download_vid(url, s3_client, check_if_exists=False):
if not os.path.exists(filename):
filename = filename.split('.')[0] + '.mkv'
key = filename.split('/')[1]
cdn_url = 'https://{}.{}.cdn.digitaloceanspaces.com/{}'.format(
os.getenv('DO_BUCKET'), os.getenv('DO_SPACES_REGION'), key)
if status != 'already archived':
key = filename.split('/')[1]
cdn_url = 'https://{}.{}.cdn.digitaloceanspaces.com/{}'.format(
os.getenv('DO_BUCKET'), os.getenv('DO_SPACES_REGION'), key)
with open(filename, 'rb') as f:
s3_client.upload_fileobj(f, Bucket=os.getenv(
'DO_BUCKET'), Key=key, ExtraArgs={'ACL': 'public-read'})
with open(filename, 'rb') as f:
s3_client.upload_fileobj(f, Bucket=os.getenv(
'DO_BUCKET'), Key=key, ExtraArgs={'ACL': 'public-read'})
key_thumb, thumb_index = get_thumbnails(filename, s3_client)
os.remove(filename)
return (cdn_url, 'success')
video_data = {
'cdn_url': cdn_url,
'thumbnail': key_thumb,
'thumbnail_index': thumb_index,
'duration': info['duration'] if 'duration' in info else None,
'title': info['title'] if 'title' in info else None,
'timestamp': info['timestamp'] if 'timestamp' in info else datetime.datetime.strptime(info['upload_date'], '%Y%m%d').timestamp() if 'upload_date' in info else None,
}
return (video_data, status)
def update_sheet(wks, row, status, url, columns):
def update_sheet(wks, row, status, video_data, columns, v):
update = []
if url is not None and columns['archive'] is not None:
update += [{
'range': columns['archive'] + str(row),
'values': [[url]]
}]
if columns['status'] is not None:
update += [{
'range': columns['status'] + str(row),
'values': [[status]]
}]
if columns['date'] is not None:
if 'cdn_url' in video_data and video_data['cdn_url'] is not None and columns['archive'] is not None and v[col_to_index(columns['archive'])] == '':
update += [{
'range': columns['archive'] + str(row),
'values': [[video_data['cdn_url']]]
}]
if 'date' in video_data and columns['date'] is not None and v[col_to_index(columns['date'])] == '':
update += [{
'range': columns['date'] + str(row),
'values': [[datetime.datetime.now().isoformat()]]
}]
wks.batch_update(update)
if 'thumbnail' in video_data and columns['thumbnail'] is not None and v[col_to_index(columns['thumbnail'])] == '':
update += [{
'range': columns['thumbnail'] + str(row),
'values': [['=IMAGE("' + video_data['thumbnail'] + '")']]
}]
if 'thumbnail_index' in video_data and columns['thumbnail_index'] is not None and v[col_to_index(columns['thumbnail_index'])] == '':
update += [{
'range': columns['thumbnail_index'] + str(row),
'values': [[video_data['thumbnail_index']]]
}]
if 'timestamp' in video_data and columns['timestamp'] is not None and video_data['timestamp'] is not None and v[col_to_index(columns['timestamp'])] == '':
update += [{
'range': columns['timestamp'] + str(row),
'values': [[datetime.datetime.fromtimestamp(video_data['timestamp']).isoformat()]]
}]
if 'title' in video_data and columns['title'] is not None and video_data['title'] is not None and v[col_to_index(columns['title'])] == '':
update += [{
'range': columns['title'] + str(row),
'values': [[video_data['title']]]
}]
if 'duration' in video_data and columns['duration'] is not None and video_data['duration'] is not None and v[col_to_index(columns['duration'])] == '':
update += [{
'range': columns['duration'] + str(row),
'values': [[str(video_data['duration'])]]
}]
wks.batch_update(update, value_input_option='USER_ENTERED')
def main():
@ -164,6 +256,16 @@ def main():
'Archive date')) if 'Archive date' in headers else None
columns['status'] = index_to_col(headers.index(
'Archive status')) if 'Archive status' in headers else None
columns['thumbnail'] = index_to_col(headers.index(
'Thumbnail')) if 'Thumbnail' in headers else None
columns['thumbnail_index'] = index_to_col(headers.index(
'Thumbnail index')) if 'Thumbnail index' in headers else None
columns['timestamp'] = index_to_col(headers.index(
'Upload timestamp')) if 'Upload timestamp' in headers else None
columns['title'] = index_to_col(headers.index(
'Upload title')) if 'Upload title' in headers else None
columns['duration'] = index_to_col(headers.index(
'Duration')) if 'Duration' in headers else None
if columns['url'] is None:
print("No 'Media URL' column found, skipping")
@ -182,18 +284,18 @@ def main():
if args.streaming and 'is_live' in info and info['is_live']:
wks.update(columns['status'] + str(i), 'Recording stream')
cdn_url, status = download_vid(v[url_index], s3_client)
update_sheet(wks, i, status, cdn_url, columns)
video_data, status = download_vid(v[url_index], s3_client)
update_sheet(wks, i, status, video_data, columns, v)
sys.exit()
elif not args.streaming and ('is_live' not in info or not info['is_live']):
cdn_url, status = download_vid(
video_data, status = download_vid(
v[url_index], s3_client, check_if_exists=True)
update_sheet(wks, i, status, cdn_url, columns)
update_sheet(wks, i, status, video_data, columns, v)
except:
# if any unexpected errors occured, log these into the Google Sheet
t, value, traceback = sys.exc_info()
update_sheet(wks, i, str(value), None)
update_sheet(wks, i, str(value), {}, columns, v)
if __name__ == "__main__":