[autotest] docker execution

This commit is contained in:
Artur 2025-10-10 21:08:12 +03:00
parent f6fd5e50c8
commit 7b92fb6073
5 changed files with 172 additions and 71 deletions

7
.gitignore vendored
View File

@ -1,2 +1,7 @@
*.log *.log
container/Drivers/* container/Drivers/*
__pycache__
*.yuv
*.mp4
*.csv
*log*txt

17
PyScripts/extra.py Normal file
View File

@ -0,0 +1,17 @@
from functools import wraps
def log_args_decorator(func):
"""
A decorator that logs the arguments passed to a function.
"""
@wraps(func)
def wrapper(*args, **kwargs):
arg_names = func.__code__.co_varnames[:func.__code__.co_argcount]
pos_args = dict(zip(arg_names, args))
all_args = {**pos_args, **kwargs}
print(f"Calling function '{func.__name__}' with arguments: {all_args}")
result = func(*args, **kwargs)
print(f"Function '{func.__name__}' returned: {result}")
return result
return wrapper

View File

@ -2,17 +2,19 @@
from itertools import product from itertools import product
import qa import qa
from latencyParse import getLatencyTable from latencyParse import getLatencyTable
import os, stat, subprocess
import pandas as pd
from extra import log_args_decorator
options = { options = {
"x264enc": { "x264enc": {
"bitrate": ["10000", "20000", "5000"], "bitrate": ["10000", "20000", "5000"],
"speed-preset": ["ultrafast", "fast", "medium"], "speed-preset": ["ultrafast", "fast", "medium"],
"tune": ["zerolatency"], "tune": ["zerolatency"],
"slices-threads": ["true", "false"], "sliced-threads": ["true", "false"],
"b-adapt": ["true", "false"], "b-adapt": ["true", "false"],
"rc-lookahead": ["40", "0"], "rc-lookahead": ["40", "0"],
"ref": ["3", "0"] "ref": ["3", "0"]
}, },
"nvh264enc": { "nvh264enc": {
"bitrate": ["10000", "20000", "5000"], "bitrate": ["10000", "20000", "5000"],
@ -20,43 +22,70 @@ options = {
"rc-lookahead": ["0"], "rc-lookahead": ["0"],
"rc-mode": ["2", "0", "5"], "rc-mode": ["2", "0", "5"],
"zerolatency": ["true", "false"], "zerolatency": ["true", "false"],
},
"nvv4l2h264enc": {
"bitrate": ["10000000", "20000000", "5000000"],
"profile": ["0", "1", "2"],
"preset-id": ["1", "2", "3"],
"control-rate": ["1", "2"],
"tuning-info-id": ["4", "2", "3"]
} }
# ,
# "nvv4l2h264enc": {
# "bitrate": ["10000000", "20000000", "5000000"],
# "profile": ["0", "1", "2"],
# "preset-id": ["1", "2", "3"],
# "control-id": ["1", "2"],
# "tuning-info-id": ["4", "2"]
# }
} }
videos = [""] videos = {
"base-daVinci": "./base-daVinci-stereo-left-10.yuv"
}
testsource = "videotestsrc pattern=smpte" testsource = "videotestsrc pattern=smpte"
videosrc = ["filesrc location=", "! qtdemux ! h264parse ! avdec_h264"]
videosrc = {
"raw":["filesrc location=", " ! rawvideoparse "],
"h264": ["filesrc location=", " ! decodebin"]
}
psnr_check = { psnr_check = {
"x264enc": "-pixel_format yuv420p -color_range pc", "x264enc": "-pixel_format yuv420p -color_range pc",
"nvh264enc": "-pixel_format nv12 -color_range tv", "nvh264enc": "-pixel_format nv12 -color_range tv",
"nvv4l2h264enc": "-pixel_format yuv420p -color_range tv" "nvv4l2h264enc": "-pixel_format nv12 -color_range tv"
} }
with_docker = [ "nvv4l2h264enc" ]
formats = { formats = {
"x264enc": "I420", "x264enc": "I420",
"nvh264enc": "NV12", "nvh264enc": "NV12",
"nvv4l2h264enc": "I420" "nvv4l2h264enc": "NV12"
} }
profiles = ["baseline", "main"] profiles = ["baseline", "main"]
encoder_prefix = {
"nvv4l2h264enc": " nvvideoconvert !",
"nvh264enc": "",
"x264enc": ""
}
video_info = { video_info = {
"video1":"-video_size 1920x1080 -framerate 23.98" "video1":"-video_size 1920x1080 -framerate 23.98",
"sample-surgery":"-video_size 1280x720 -framerate 29.97",
"base-daVinci": "-video_size 1280x720 -framerate 59.94"
}
gst_video_info = {
"video1":"format=I420,height=1080,width=1920,framerate=24000/1001",
"base-daVinci": "format=2 height=720 width=1280 colorimetry=bt601 framerate=60000/1001"
} }
latency_filename = "latency-traces-autotest.log" latency_filename = "latency-traces-autotest.log"
# Step-by-step:
# 1. Generate all combinations for each encoder
# 2. For each combination, create a GStreamer pipeline string
# 3. Start each pipeline with latency tracing enabled
# 3.1 Monitor CPU, GPU and memory usage during each pipeline run (nah, later, maybe)
# 4. Start latency parsing script after each pipeline and store results in a pandas dataframe:
# - two key columns: encoder name, parameters string
# 5. Run PSNR check after each pipeline and add results in the dataframe
# 6. Save dataframe to CSV file
class Pipeline: class Pipeline:
def __init__(self): def __init__(self):
self.pipeline = "gst-launch-1.0 -e " self.pipeline = "gst-launch-1.0 -e "
@ -72,33 +101,34 @@ class Pipeline:
return self return self
def add_source(self, source): def add_source(self, source):
self.pipeline += source + " ! videoconvert ! " self.pipeline += source + " ! clocksync sync-to-first=true ! videoconvert ! "
return self return self
def __add_tee(self, encoder): def __add_tee(self, encoder):
self.pipeline += "capsfilter caps=video/x-raw,format=" + formats[encoder] + " ! " self.pipeline += "capsfilter caps=video/x-raw,format=" + formats[encoder] + " ! "
self.pipeline += "tee name=t t. ! queue ! filesink location=\"base-autotest.yuv\" " self.pipeline += "tee name=t t. ! queue max-size-time=5000000000 max-size-bytes=100485760 max-size-buffers=1000 ! filesink location=\"base-autotest.yuv\" "
def add_encoder(self, encoder, params): def add_encoder(self, encoder, params):
self.__add_tee(encoder) self.__add_tee(encoder)
self.options += " ".join(params) + " " self.options += " ".join(params) + " "
self.pipeline += "t. ! queue ! " self.pipeline += "t. ! queue max-size-time=5000000000 max-size-bytes=100485760 max-size-buffers=1000 ! "
self.pipeline += encoder_prefix[encoder]
self.pipeline += encoder + " " self.pipeline += encoder + " "
self.pipeline += " ".join(params) + " " self.pipeline += " ".join(params) + " "
return self return self
def add_profile(self, profile): def add_profile(self, profile):
self.pipeline += "capsfilter caps=\"video/x-h264,profile=" + profile + "\" ! " self.pipeline += "! capsfilter caps=\"video/x-h264,profile=" + profile + "\" ! "
self.options += "profile=" + profile + " " self.options += "profile=" + profile + " "
return self return self
def to_file(self, filename): def to_file(self, filename):
self.pipeline += "h264parse ! mp4mux ! filesink location=\"" + filename + "\"" self.pipeline += "h264parse ! mpegtsmux ! filesink location=\"" + filename + "\""
return self return self
def makeVideoSrc(idx): def makeVideoSrc(videoName):
return videosrc[0] + videos[idx] + videosrc[1] return videosrc["raw"][0] + videos[videoName] + videosrc["raw"][1] + gst_video_info[videoName]
def generateEncoderStrings(): def generateEncoderStrings():
@ -133,45 +163,85 @@ def generate_combinations(config_dict):
return combinations return combinations
# Step-by-step:
# 1. Generate all combinations for each encoder
# 2. For each combination, create a GStreamer pipeline string
# 3. Start each pipeline with latency tracing enabled
# 3.1 Monitor CPU, GPU and memory usage during each pipeline run (nah, later, maybe)
# 4. Start latency parsing script after each pipeline and store results in a pandas dataframe:
# - two key columns: encoder name, parameters string
# 5. Run PSNR check after each pipeline and add results in the dataframe
# 6. Save dataframe to CSV file
import pandas as pd
qualityDataframe = pd.DataFrame() qualityDataframe = pd.DataFrame()
latencyDataframe = pd.DataFrame() latencyDataframe = pd.DataFrame()
dockerRunString = "sudo -S docker container exec deepstream-gst bash"
def run_pipeline(pipeline): def execPermissions(scriptFile = "to_exec.sh"):
import subprocess current_permissions = os.stat(scriptFile).st_mode
print("Running pipeline:") new_permissions = current_permissions | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
print(pipeline) os.chmod(scriptFile, new_permissions)
with open("pipeline-log.txt", "w") as f:
proc = subprocess.run(pipeline, shell=True, stdout=f, stderr=subprocess.STDOUT, text=True) def writeToExecFile(contents, file):
print(f"Pipeline finished with return code: {proc.returncode}") with open(file, "w") as f:
with open("pipeline-log.txt", "r") as f: f.write(str(contents))
execPermissions(file)
def is_docker(func):
def wrapper(pipeline):
script_name = "to_exec.sh"
for encoder in with_docker:
if encoder in pipeline:
writeToExecFile(pipeline, script_name)
pipeline = dockerRunString + f" {script_name}"
func(pipeline)
return wrapper
def is_sudo(pipeline):
if pipeline.startswith("sudo"):
return True
return False
def passwordAuth(proc):
password = os.getenv("UAUTH")
if password is not None:
proc.communicate(password)
def printLog(file):
with open(file, "r") as f:
out = f.read() out = f.read()
print(out) print(out)
@is_docker
@log_args_decorator
def run_pipeline(pipeline):
logfile = "pipeline-log.txt"
with open(logfile, "w") as f:
proc = subprocess.Popen(pipeline, shell=True,
stdin=subprocess.PIPE, stdout=f,
stderr=subprocess.STDOUT, text=True)
if is_sudo(pipeline):
passwordAuth(proc)
code = proc.wait()
printLog(logfile)
if proc.returncode != 0: if proc.returncode != 0:
raise Exception("Pipeline failed, see log for details") raise Exception("Pipeline failed, see log for details")
def time_trace(func):
def wrapper():
import time
start_time = time.time()
func()
end_time = time.time()
elapsed_time = end_time - start_time
print(f"Total execution time: {elapsed_time} seconds")
return wrapper
@time_trace
def run_autotest(): def run_autotest():
global qualityDataframe, latencyDataframe
encoders = generateEncoderStrings() encoders = generateEncoderStrings()
for encoder, combinations in encoders.items(): for encoder, combinations in encoders.items():
qualityDataframe = pd.DataFrame()
latencyDataframe = pd.DataFrame()
for params in combinations: for params in combinations:
for profile in profiles: for profile in profiles:
for idx in range(len(videos)): for videoName, videoPath in videos.items():
filename = "autotest-" + encoder + "-" + profile + "-test-" + str(idx) + ".mp4" filename = "autotest-" + encoder + "-" + profile + "-test-" + videoName + ".mp4"
pipeline = Pipeline() pipeline = Pipeline()
pipeline = ( pipeline = (
pipeline.add_tracing() pipeline.add_tracing()
.add_source(makeVideoSrc(idx)) .add_source(makeVideoSrc(videoName))
.add_encoder(encoder, params.split(" ")) .add_encoder(encoder, params.split(" "))
.add_profile(profile) .add_profile(profile)
.to_file(filename) .to_file(filename)
@ -183,18 +253,18 @@ def run_autotest():
print(f"Error occurred: {e}") print(f"Error occurred: {e}")
continue continue
psnr_metrics, ssim_metrics = qa.run_quality_check( psnr_metrics, ssim_metrics = qa.run_quality_check(
videos[idx], videoPath,
filename, filename,
video_info[videos[idx]] + " " + psnr_check[encoder] video_info[videoName] + " " + psnr_check[encoder]
) )
dfPsnr = qa.parse_quality_report(psnr_metrics, ssim_metrics) dfPsnr = qa.parse_quality_report(psnr_metrics, ssim_metrics)
print("-----") print("-----")
dfLatency = getLatencyTable(latency_filename) dfLatency = getLatencyTable(latency_filename)
columnsQ = pd.MultiIndex.from_tuples( columnsQ = pd.MultiIndex.from_tuples(
[(encoder, profile, params, col) for col in dfPsnr.columns] [(encoder, profile, videoName, params, col) for col in dfPsnr.columns]
) )
columnsLatency = pd.MultiIndex.from_tuples( columnsLatency = pd.MultiIndex.from_tuples(
[(encoder, profile, params, col) for col in dfLatency.columns] [(encoder, profile, videoName, params, col) for col in dfLatency.columns]
) )
dfPsnr.columns = columnsQ dfPsnr.columns = columnsQ
dfLatency.columns = columnsLatency dfLatency.columns = columnsLatency
@ -204,13 +274,7 @@ def run_autotest():
print("Current results:") print("Current results:")
print(dfPsnr) print(dfPsnr)
print(dfLatency) print(dfLatency)
qualityDataframe.to_csv(f"qualityResults{encoder}.csv")
latencyDataframe.to_csv(f"latencyDataframe{encoder}.csv")
def run_timetracer(): run_autotest()
import time
start_time = time.time()
run_autotest()
end_time = time.time()
elapsed_time = end_time - start_time
print(f"Total execution time: {elapsed_time} seconds")
run_timetracer()

View File

@ -4,16 +4,20 @@ import numpy as np
# Idea is next: # Idea is next:
# on set of experiments we are calculating all latency information -> each element avg, std, max numbers, total is not calculated, because it requires # on set of experiments we are calculating all latency information -> each element avg, std, max numbers, total is not calculated, because it requires
# additional parsing for parallel branches (from tee) # additional parsing for parallel branches (from tee)
# Ideally we would write data to table # Ideally we would write data to table
idxCache = dict() idxCache = dict()
def findWord(words, wordToSearch): def findWord(words, wordToSearch):
global idxCache global idxCache
if wordToSearch in idxCache: if wordToSearch in idxCache:
for idx in idxCache[wordToSearch]: for idx in idxCache[wordToSearch]:
if words[idx].startswith(wordToSearch): if idx < len(words) and words[idx].startswith(wordToSearch):
return words[idx] return words[idx]
else:
if idx >= len(words):
print(f"ERROR: trying to access index={idx} while: {words}")
for word in words: for word in words:
if word.startswith(wordToSearch): if word.startswith(wordToSearch):
idx = words.index(word) idx = words.index(word)
@ -24,9 +28,11 @@ def findWord(words, wordToSearch):
return "" return ""
# taken with love from GStreamerLatencyPlotter implementation # taken with love from GStreamerLatencyPlotter implementation
def readAndParse(filename): def readAndParse(filename):
result = dict() result = dict()
global idxCache
with open(filename, "r") as latencyFile: with open(filename, "r") as latencyFile:
lines = latencyFile.readlines() lines = latencyFile.readlines()
for line in lines: for line in lines:
@ -35,12 +41,12 @@ def readAndParse(filename):
words = line.split() words = line.split()
if not words[len(words) - 1].startswith("ts="): if not words[len(words) - 1].startswith("ts="):
continue continue
def findAndRemove(wordToSearch): def findAndRemove(wordToSearch):
res = findWord(words, wordToSearch) res = findWord(words, wordToSearch)
res = res[res.find(")") + 1:len(res) - 1] res = res[res.find(")") + 1:len(res) - 1]
return res return res
name = findWord(words, "element=(string)") name = findWord(words, "element=(string)")
if name == "": if name == "":
name = findWord(words, "src-element=(string)") name = findWord(words, "src-element=(string)")
@ -49,12 +55,15 @@ def readAndParse(filename):
src = findAndRemove("src=(string)") src = findAndRemove("src=(string)")
name = name[name.find(")") + 1:len(name) - 1] name = name[name.find(")") + 1:len(name) - 1]
if name not in result: if name not in result:
result[name] = {"latency":[], "ts":[]} result[name] = {"latency": [], "ts": []}
timeWord = findAndRemove("time=(guint64)") timeWord = findAndRemove("time=(guint64)")
tsWord = findAndRemove("ts=(guint64)") tsWord = findAndRemove("ts=(guint64)")
result[name]["latency"].append(int(timeWord)/1e6) # time=(guint64)=14 result[name]["latency"].append(
result[name]["ts"].append(int(tsWord)/1e9) # ts=(guint64)=12 int(timeWord)/1e6) # time=(guint64)=14
result[name]["ts"].append(int(tsWord)/1e9) # ts=(guint64)=12
# drop cache for future runs
idxCache = dict()
return result return result
@ -78,9 +87,10 @@ def getLatencyTable(filename):
dt_max_latency[column] = dt dt_max_latency[column] = dt
df_dt_max = pd.Series(dt_max_latency) df_dt_max = pd.Series(dt_max_latency)
resultDf = pd.concat([df_dt_max, max_latency, avg_latency, median_latency, std_latency], axis=1) resultDf = pd.concat(
[df_dt_max, max_latency, avg_latency, median_latency, std_latency], axis=1)
resultDf.columns = ['dTmax', 'max', 'avg', 'median', 'std'] resultDf.columns = ['dTmax', 'max', 'avg', 'median', 'std']
print(resultDf) print(resultDf)
return resultDf return resultDf
getLatencyTable("latency_traces-x264enc-kpop-test-10.log") # getLatencyTable("latency_traces-x264enc-kpop-test-10.log")

View File

@ -4,6 +4,8 @@ import pandas as pd
def run_psnr_check(original, encoded, video_info): def run_psnr_check(original, encoded, video_info):
out = "" out = ""
# bad practice, but idgaf
# -f rawvideo {video_info}
options = f"-f rawvideo {video_info} -i {original} -i {encoded} -filter_complex psnr -f null /dev/null" options = f"-f rawvideo {video_info} -i {original} -i {encoded} -filter_complex psnr -f null /dev/null"
with open("ffmpeg-log.txt", "w") as f: with open("ffmpeg-log.txt", "w") as f:
proc = subprocess.run(["ffmpeg", *options.split()], stdout=f, stderr=subprocess.STDOUT, text=True) proc = subprocess.run(["ffmpeg", *options.split()], stdout=f, stderr=subprocess.STDOUT, text=True)
@ -13,6 +15,9 @@ def run_psnr_check(original, encoded, video_info):
return out return out
def run_ssim_check(original, encoded, video_info): def run_ssim_check(original, encoded, video_info):
# bad practice, but idgaf
# -f rawvideo {video_info}
# we don't need additional information with h264 encoded files
options = f"-f rawvideo {video_info} -i {original} -i {encoded} -filter_complex ssim -f null /dev/null" options = f"-f rawvideo {video_info} -i {original} -i {encoded} -filter_complex ssim -f null /dev/null"
with open("ffmpeg-log.txt", "w") as f: with open("ffmpeg-log.txt", "w") as f:
proc = subprocess.run(["ffmpeg", *options.split()], stdout=f, stderr=subprocess.STDOUT, text=True) proc = subprocess.run(["ffmpeg", *options.split()], stdout=f, stderr=subprocess.STDOUT, text=True)