vid_convert/vid_convert.py

211 lines
9.3 KiB
Python
Raw Normal View History

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import subprocess
import logging
def get_infos(file):
'''
Cette fonction extrait les informations du film à l'aide de ffprobe et les stocke
dans un dictionnaire pour une utilisation ultérieure.
-> http://ffmpeg.org/ffprobe.html
'''
v_infos = {
2021-06-20 01:57:50 +02:00
'index': None,
'height': None,
'width': None,
'color_primaries': None,
'color_space': None,
'color_transfer': None,
2021-06-20 03:15:24 +02:00
'pix_fmt': None,
2021-06-20 01:18:59 +02:00
'display_aspect_ratio': None,
}
a_infos = {}
2021-06-20 03:15:24 +02:00
v_infos_cmd = f'ffprobe -v quiet -print_format json -show_format -show_streams -show_frames -read_intervals "%+#1" -select_streams v {file}'
v_infos_raw = subprocess.getoutput(v_infos_cmd)
a_infos_cmd = f"ffprobe -v quiet -print_format json -show_format -show_streams -select_streams a {file}"
a_infos_raw = subprocess.getoutput(a_infos_cmd)
full_v_infos = json.loads(v_infos_raw)
full_a_infos = json.loads(a_infos_raw)
v_stream = full_v_infos['streams'][0]
for prop in v_infos.keys():
try:
v_infos.update({prop: v_stream[prop]})
except KeyError:
pass
2021-06-20 03:15:24 +02:00
try:
v_infos.update({'side_data_list': full_v_infos['frames'][0]['side_data_list']})
except KeyError:
pass
a_infos = []
for a_stream in full_a_infos['streams']:
a_stream_infos = {
'index': a_stream['index'],
2021-06-20 01:27:05 +02:00
'channels': a_stream['channels'],
2021-06-20 01:44:48 +02:00
'channel_layout': a_stream['channel_layout'],
2021-06-20 01:27:05 +02:00
'language': a_stream['tags']['language'],
'title': a_stream['tags']['title']}
a_infos.append(a_stream_infos)
duration = subprocess.getoutput(f"ffprobe -v quiet -print_format json -show_format {file}")
duration = json.loads(duration)
duration = float(duration['format']['duration'])
2021-06-20 01:18:59 +02:00
hdr10_v_cmd = f'ffmpeg -loglevel panic -i {file} -c:v copy -vbsf hevc_mp4toannexb -f hevc - | ./hdr10plus_parser -o metadata.json --verify -'
hdr10_v_raw = subprocess.getoutput(hdr10_v_cmd)
logging.debug(hdr10_v_raw)
if 'metadata detected' in hdr10_v_raw:
hdr10_cmd = f'ffmpeg -loglevel panic -i {file} -c:v copy -vbsf hevc_mp4toannexb -f hevc - | ./hdr10plus_parser -o /tmp/{file}_hdr10_metadata.json -'
v_infos.update({'hdr10': True, 'hdr10_metdata': f'/tmp/{file}_hdr10_metadata.json'})
infos = {'duration': duration, 'video': v_infos, 'audio': a_infos}
logging.debug("Informations du film : \n" + json.dumps(infos, indent=True))
return infos
2021-04-15 15:31:34 +02:00
def is_interlaced(file, infos):
'''
Cette fonction detecte si la vidéo est entrelacée.
-> https://fr.wikipedia.org/wiki/Entrelacement_(vid%C3%A9o)
'''
duration_tier = int(infos['duration'] / 3)
command = f"ffmpeg -loglevel info -ss {duration_tier} -t {duration_tier} -i {file} -an -filter:v idet -f null -y /dev/null"
result = subprocess.getoutput(command)
for line in result.splitlines():
if "Multi" in line:
TFF = int(line.split('TFF:')[1].split()[0])
BFF = int(line.split('BFF:')[1].split()[0])
Progressive = int(line.split('Progressive:')[1].split()[0])
try:
pct = ((TFF + BFF) / (TFF + BFF + Progressive)) * 100
pct = round(pct)
except ZeroDivisionError:
pct = 100
if pct > 10:
logging.debug("Vidéo entrelacée à {pct}%")
return True
else:
logging.debug("Vidéo non entrelacée")
return False
2021-04-15 15:31:34 +02:00
def cropping(file, infos):
'''
Cette fonction detecte les bandes inutiles de la vidéo
'''
duration_tier = int(infos['duration'] / 3)
command = f"ffmpeg -loglevel info -i {file} -ss {duration_tier} -t {duration_tier} -an -f null -vf cropdetect -y /dev/null"
cropsize = subprocess.getoutput(command).splitlines()[-3].split()[-1]
logging.debug(f"Paramètre de découpe : {cropsize}")
return cropsize
2021-04-15 15:52:51 +02:00
def volume_audio(file, infos):
'''
Cette fonction ajuste le volume vers 0dB
'''
volumes = {}
for piste_audio in infos['audio']:
piste = piste_audio['index']
command = f"ffmpeg -loglevel info -i {file} -map 0:{piste} -af volumedetect -f null -y /dev/null"
volumedetect = subprocess.getoutput(command)
for line in volumedetect.splitlines():
if "max_volume" in line:
volume = line.split()[-2]
volume = f"{str(-float(volume))}dB"
logging.debug(f"Ajustement du volume de la piste {piste} : {volume}")
volumes.update({piste: volume})
return volumes
2021-04-15 16:20:39 +02:00
def stabilization(file):
'''
Cette fonction permet de stabiliser l'image,
par exemple quand filmé au smartphone.
'''
cmd_stab = f'ffmpeg -i {file} -vf vidstabdetect=shakiness=10:accuracy=10:result="/tmp/vidstab.trf" -f null - '
subprocess.getoutput(cmd_stab)
2021-04-15 16:20:39 +02:00
2021-06-20 01:44:48 +02:00
def convert_audio(file, track, volume_adj, channels, channel_layout, language, title):
2021-06-20 01:57:50 +02:00
bitrate = f'{64*channels}k'
2021-06-20 01:44:48 +02:00
codec = 'libopus'
metadatas = f'-metadata language="{language}" -metadata title="{title}"'
2021-06-20 01:57:50 +02:00
command = f'ffmpeg -loglevel error -i {file} -map 0:{track} -vn -sn -c:a {codec} -b:a {bitrate} -mapping_family 1 -filter:a volume={volume_adj},aformat=channel_layouts={channel_layout} -y {file}_audio_{track}.mka'
logging.debug(command)
result = subprocess.getoutput(command)
logging.info(result)
2021-06-20 01:44:48 +02:00
2021-06-20 03:15:24 +02:00
def convert_video(file, infos, start, crop, crf):
output = f'{file}_video_t{start}.mkv'
fmt = infos['video']['pix_fmt']
track = infos['video']['index']
codec = 'libx265 -crf {crf} -preset slower'
if 'side_data_list' in infos['video'].keys():
light_level = f"{infos['video']['side_data_list'][1]['max_content']},{infos['video']['side_data_list'][1]['max_average']}"
color_primaries = infos['video']['color_primaries']
color_transfer = infos['video']['color_transfer']
color_space = infos['video']['color_space']
green_x = infos['video']['side_data_list'][0]['green_x'].split('/')
green_x = int(int(green_x[0])*(int(green_x[1])/50000))
green_y = infos['video']['side_data_list'][0]['green_y'].split('/')
green_y = int(int(green_y[0])*(int(green_y[1])/50000))
green = f'G({green_x},{green_y})'
blue_x = infos['video']['side_data_list'][0]['blue_x'].split('/')
blue_x = int(int(blue_x[0])*(int(blue_x[1])/50000))
blue_y = infos['video']['side_data_list'][0]['blue_y'].split('/')
blue_y = int(int(blue_y[0])*(int(blue_y[1])/50000))
blue = f'B({blue_x},{blue_y})'
red_x = infos['video']['side_data_list'][0]['red_x'].split('/')
red_x = int(int(red_x[0])*(int(red_x[1])/50000))
red_y = infos['video']['side_data_list'][0]['red_y'].split('/')
red_y = int(int(red_y[0])*(int(red_y[1])/50000))
red = f'R({red_x},{red_y})'
white_point_x = infos['video']['side_data_list'][0]['white_point_x'].split('/')
white_point_x = int(int(white_point_x[0])*(int(white_point_x[1])/50000))
white_point_y = infos['video']['side_data_list'][0]['white_point_y'].split('/')
white_point_y = int(int(white_point_y[0])*(int(white_point_y[1])/50000))
white_point = f'WP({white_point_x},{white_point_y})'
min_luminance = infos['video']['side_data_list'][0]['min_luminance'].split('/')
min_luminance = int(int(min_luminance[0])*(int(min_luminance[1])/10000))
max_luminance = infos['video']['side_data_list'][0]['max_luminance'].split('/')
max_luminance = int(int(max_luminance[0])*(int(max_luminance[1])/10000))
luminance = f'L({max_luminance},{min_luminance})'
master_display = green + blue + red + white_point + luminance
hdr = f'-x265-params hdr-opt=1:repeat-headers=1:colorprim={color_primaries}:transfer={color_transfer}:colormatrix={color_space}:master-display={master_display}:max-cll={light_level}'
else:
hdr = ""
command = f'ffmpeg -loglevel error -i {file} -map 0:{track} -ss {start} -t 300 -an -sn -c:v {codec} {hdr} -crf {crf} -pix_fmt {fmt} -filter:v {crop} -y {output}'
logging.debug(command)
import pdb; pdb.set_trace()
result = subprocess.getoutput(command)
logging.info(result)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("f_input")
2021-04-15 16:20:39 +02:00
parser.add_argument("-d", "--debug", dest="debug", action="store_true")
parser.add_argument("-s", "--stabilise", dest="stab", action="store_true")
args = parser.parse_args()
if args.debug:
logging.basicConfig(format='[%(asctime)s]\n%(message)s', level=logging.DEBUG, datefmt='%d/%m/%Y %H:%M:%S')
else:
logging.basicConfig(format='[%(asctime)s]\n%(message)s', level=logging.INFO, datefmt='%d/%m/%Y %H:%M:%S')
2021-06-20 01:57:50 +02:00
file = args.f_input
infos = get_infos(file)
interlaced = is_interlaced(file, infos)
cropsize = cropping(file, infos)
volumes = volume_audio(file, infos)
2021-04-15 16:20:39 +02:00
if args.stab:
2021-06-20 01:57:50 +02:00
stabilization(file)
for track in infos['audio']:
convert_audio(file, track['index'], volumes[track['index']], track['channels'], track['channel_layout'], track['language'], track['title'])
2021-06-20 03:15:24 +02:00
vid_part_time = 0
crf = 20
while vid_part_time < infos['duration']:
convert_video(file, infos, vid_part_time, cropsize, crf)
vid_part_time += 300