[autotest] move autotest to separate repository
This commit is contained in:
parent
dd6da41b0d
commit
e6eaa57dca
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -1,3 +1,6 @@
|
||||
[submodule "GStreamerLatencyPlotter"]
|
||||
path = GStreamerLatencyPlotter
|
||||
url = https://github.com/podborski/GStreamerLatencyPlotter.git
|
||||
[submodule "gstAutotest"]
|
||||
path = gstAutotest
|
||||
url = https://vptyp.tech/git/vptyp/gstAutotest.git
|
||||
|
||||
@ -1,17 +0,0 @@
|
||||
from functools import wraps
|
||||
|
||||
def log_args_decorator(func):
|
||||
"""
|
||||
A decorator that logs the arguments passed to a function.
|
||||
"""
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
arg_names = func.__code__.co_varnames[:func.__code__.co_argcount]
|
||||
pos_args = dict(zip(arg_names, args))
|
||||
all_args = {**pos_args, **kwargs}
|
||||
|
||||
print(f"Calling function '{func.__name__}' with arguments: {all_args}")
|
||||
result = func(*args, **kwargs)
|
||||
print(f"Function '{func.__name__}' returned: {result}")
|
||||
return result
|
||||
return wrapper
|
||||
@ -1,285 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
from itertools import product
|
||||
import qa
|
||||
from latencyParse import getLatencyTable
|
||||
import os, stat, subprocess
|
||||
import pandas as pd
|
||||
from extra import log_args_decorator
|
||||
|
||||
options = {
|
||||
"x264enc": {
|
||||
"bitrate": ["10000", "20000", "5000"],
|
||||
"speed-preset": ["ultrafast", "fast", "medium"],
|
||||
"tune": ["zerolatency"],
|
||||
"sliced-threads": ["true", "false"],
|
||||
"b-adapt": ["true", "false"],
|
||||
"rc-lookahead": ["40", "0"],
|
||||
"ref": ["3", "0"]
|
||||
},
|
||||
"nvh264enc": {
|
||||
"bitrate": ["10000", "20000", "5000"],
|
||||
"preset": ["4", "5", "1"],
|
||||
"rc-lookahead": ["0"],
|
||||
"rc-mode": ["2", "0", "5"],
|
||||
"zerolatency": ["true", "false"],
|
||||
},
|
||||
"nvv4l2h264enc": {
|
||||
"bitrate": ["10000000", "20000000", "5000000"],
|
||||
"profile": ["0", "1", "2"],
|
||||
"preset-id": ["1", "2", "3"],
|
||||
"control-rate": ["1", "2"],
|
||||
"idrinterval": ["1", "256"],
|
||||
"tuning-info-id": ["4", "2", "3"]
|
||||
}
|
||||
}
|
||||
|
||||
videos = {
|
||||
"base-daVinci": "./test.yuv"
|
||||
}
|
||||
|
||||
testsource = "videotestsrc pattern=smpte"
|
||||
|
||||
videosrc = {
|
||||
"raw":["filesrc location=", " ! rawvideoparse "],
|
||||
"h264": ["filesrc location=", " ! decodebin"]
|
||||
}
|
||||
|
||||
psnr_check = {
|
||||
"x264enc": "-pixel_format yuv420p -color_range pc",
|
||||
"nvh264enc": "-pixel_format nv12 -color_range tv",
|
||||
"nvv4l2h264enc": "-pixel_format nv12 -color_range tv"
|
||||
}
|
||||
|
||||
with_docker = [ "nvv4l2h264enc" ]
|
||||
|
||||
repeats = 3
|
||||
|
||||
formats = {
|
||||
"x264enc": "I420",
|
||||
"nvh264enc": "NV12",
|
||||
"nvv4l2h264enc": "NV12"
|
||||
}
|
||||
|
||||
profiles = ["baseline", "main"]
|
||||
|
||||
videoconvert = {
|
||||
"nvv4l2h264enc": "nvvideoconvert",
|
||||
"nvh264enc": "videoconvert",
|
||||
"x264enc": "videoconvert"
|
||||
}
|
||||
|
||||
video_info = {
|
||||
"video1":"-video_size 1920x1080 -framerate 23.98",
|
||||
"sample-surgery":"-video_size 1280x720 -framerate 29.97",
|
||||
"base-daVinci": "-video_size 1280x720 -framerate 59.94"
|
||||
}
|
||||
gst_video_info = {
|
||||
"video1":"format=I420,height=1080,width=1920,framerate=24000/1001",
|
||||
"base-daVinci": "format=2 height=720 width=1280 colorimetry=bt601 framerate=60000/1001"
|
||||
}
|
||||
|
||||
latency_filename = "latency-traces-autotest.log"
|
||||
|
||||
# Step-by-step:
|
||||
# 1. Generate all combinations for each encoder
|
||||
# 2. For each combination, create a GStreamer pipeline string
|
||||
# 3. Start each pipeline with latency tracing enabled
|
||||
# 3.1 Monitor CPU, GPU and memory usage during each pipeline run (nah, later, maybe)
|
||||
# 4. Start latency parsing script after each pipeline and store results in a pandas dataframe:
|
||||
# - two key columns: encoder name, parameters string
|
||||
# 5. Run PSNR check after each pipeline and add results in the dataframe
|
||||
# 6. Save dataframe to CSV file
|
||||
class Pipeline:
|
||||
def __init__(self):
|
||||
self.pipeline = "gst-launch-1.0 -e "
|
||||
self.options = ""
|
||||
|
||||
def add_tracing(self):
|
||||
self.pipeline = (
|
||||
"GST_DEBUG_COLOR_MODE=off " +
|
||||
"GST_TRACERS=\"latency(flags=pipeline+element)\" " +
|
||||
"GST_DEBUG=GST_TRACER:7 GST_DEBUG_FILE=" + latency_filename + " " +
|
||||
self.pipeline
|
||||
)
|
||||
return self
|
||||
|
||||
def add_source(self, source):
|
||||
self.pipeline += source + " ! clocksync sync-to-first=true ! "
|
||||
return self
|
||||
|
||||
def __add_tee(self, encoder):
|
||||
pass
|
||||
#self.pipeline += "tee name=t t. ! queue max-size-time=5000000000 max-size-bytes=100485760 max-size-buffers=1000 ! filesink location=\"base-autotest.yuv\" "
|
||||
|
||||
def add_encoder(self, encoder, params):
|
||||
self.pipeline += videoconvert[encoder] + " ! "
|
||||
self.pipeline += "capsfilter caps=video/x-raw,format=" + formats[encoder] + " ! "
|
||||
#self.__add_tee(encoder)
|
||||
self.options += " ".join(params) + " "
|
||||
#self.pipeline += "t. ! queue max-size-time=5000000000 max-size-bytes=100485760 max-size-buffers=1000 ! "
|
||||
self.pipeline += encoder + " "
|
||||
self.pipeline += " ".join(params) + " "
|
||||
return self
|
||||
|
||||
def add_profile(self, profile):
|
||||
self.pipeline += "! capsfilter caps=\"video/x-h264,profile=" + profile + "\" ! "
|
||||
self.options += "profile=" + profile + " "
|
||||
return self
|
||||
|
||||
def to_file(self, filename):
|
||||
self.pipeline += "h264parse ! mpegtsmux ! filesink location=\"" + filename + "\""
|
||||
return self
|
||||
|
||||
|
||||
def makeVideoSrc(videoName):
|
||||
return videosrc["raw"][0] + videos[videoName] + videosrc["raw"][1] + gst_video_info[videoName]
|
||||
|
||||
|
||||
def generateEncoderStrings():
|
||||
global options
|
||||
result = dict()
|
||||
for encoder, value in options.items():
|
||||
result[encoder] = generate_combinations(value)
|
||||
return result
|
||||
|
||||
|
||||
def generate_combinations(config_dict):
|
||||
"""
|
||||
Generate all combinations of values from a configuration dictionary.
|
||||
|
||||
Args:
|
||||
config_dict (dict): Dictionary with parameter names as keys and lists of values as values
|
||||
|
||||
Returns:
|
||||
list: List of strings containing all parameter combinations
|
||||
"""
|
||||
combinations = []
|
||||
|
||||
keys = list(config_dict.keys())
|
||||
value_lists = [config_dict[key] for key in keys]
|
||||
|
||||
for combo in product(*value_lists):
|
||||
param_strings = []
|
||||
for key, value in zip(keys, combo):
|
||||
param_strings.append(f"{key}={value}")
|
||||
|
||||
combinations.append(" ".join(param_strings))
|
||||
|
||||
return combinations
|
||||
|
||||
qualityDataframe = pd.DataFrame()
|
||||
latencyDataframe = pd.DataFrame()
|
||||
dockerRunString = "sudo -S docker container exec deepstream-gst bash"
|
||||
|
||||
def execPermissions(scriptFile = "to_exec.sh"):
|
||||
current_permissions = os.stat(scriptFile).st_mode
|
||||
new_permissions = current_permissions | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
|
||||
os.chmod(scriptFile, new_permissions)
|
||||
|
||||
def writeToExecFile(contents, file):
|
||||
with open(file, "w") as f:
|
||||
f.write(str(contents))
|
||||
execPermissions(file)
|
||||
|
||||
def is_docker(func):
|
||||
def wrapper(pipeline):
|
||||
script_name = "to_exec.sh"
|
||||
for encoder in with_docker:
|
||||
if encoder in pipeline:
|
||||
writeToExecFile(pipeline, script_name)
|
||||
pipeline = dockerRunString + f" {script_name}"
|
||||
|
||||
func(pipeline)
|
||||
return wrapper
|
||||
|
||||
def is_sudo(pipeline):
|
||||
if pipeline.startswith("sudo"):
|
||||
return True
|
||||
return False
|
||||
|
||||
def passwordAuth(proc):
|
||||
password = os.getenv("UAUTH")
|
||||
if password is not None:
|
||||
proc.communicate(password)
|
||||
|
||||
def printLog(file):
|
||||
with open(file, "r") as f:
|
||||
out = f.read()
|
||||
print(out)
|
||||
|
||||
@is_docker
|
||||
@log_args_decorator
|
||||
def run_pipeline(pipeline):
|
||||
logfile = "pipeline-log.txt"
|
||||
with open(logfile, "w") as f:
|
||||
proc = subprocess.Popen(pipeline, shell=True,
|
||||
stdin=subprocess.PIPE, stdout=f,
|
||||
stderr=subprocess.STDOUT, text=True)
|
||||
if is_sudo(pipeline):
|
||||
passwordAuth(proc)
|
||||
code = proc.wait()
|
||||
printLog(logfile)
|
||||
if proc.returncode != 0:
|
||||
raise Exception("Pipeline failed, see log for details")
|
||||
|
||||
def time_trace(func):
|
||||
def wrapper():
|
||||
import time
|
||||
start_time = time.time()
|
||||
func()
|
||||
end_time = time.time()
|
||||
elapsed_time = end_time - start_time
|
||||
print(f"Total execution time: {elapsed_time} seconds")
|
||||
return wrapper
|
||||
|
||||
@time_trace
|
||||
def run_autotest():
|
||||
encoders = generateEncoderStrings()
|
||||
for encoder, combinations in encoders.items():
|
||||
qualityDataframe = pd.DataFrame()
|
||||
latencyDataframe = pd.DataFrame()
|
||||
for params in combinations:
|
||||
for profile in profiles:
|
||||
for videoName, videoPath in videos.items():
|
||||
for _ in range(repeats):
|
||||
filename = "autotest-" + encoder + "-" + profile + "-test-" + videoName + ".mp4"
|
||||
pipeline = Pipeline()
|
||||
pipeline = (
|
||||
pipeline.add_tracing()
|
||||
.add_source(makeVideoSrc(videoName))
|
||||
.add_encoder(encoder, params.split(" "))
|
||||
.add_profile(profile)
|
||||
.to_file(filename)
|
||||
)
|
||||
print(pipeline.pipeline)
|
||||
try:
|
||||
run_pipeline(pipeline.pipeline)
|
||||
except Exception as e:
|
||||
print(f"Error occurred: {e}")
|
||||
continue
|
||||
psnr_metrics, ssim_metrics = qa.run_quality_check(
|
||||
videoPath,
|
||||
filename,
|
||||
video_info[videoName] + " " + psnr_check[encoder]
|
||||
)
|
||||
dfPsnr = qa.parse_quality_report(psnr_metrics, ssim_metrics)
|
||||
print("-----")
|
||||
dfLatency = getLatencyTable(latency_filename)
|
||||
columnsQ = pd.MultiIndex.from_tuples(
|
||||
[(encoder, profile, videoName, params, col) for col in dfPsnr.columns]
|
||||
)
|
||||
columnsLatency = pd.MultiIndex.from_tuples(
|
||||
[(encoder, profile, videoName, params, col) for col in dfLatency.columns]
|
||||
)
|
||||
dfPsnr.columns = columnsQ
|
||||
dfLatency.columns = columnsLatency
|
||||
qualityDataframe = pd.concat([qualityDataframe, dfPsnr], axis=1)
|
||||
latencyDataframe = pd.concat([latencyDataframe, dfLatency], axis=1)
|
||||
print("=====")
|
||||
print("Current results:")
|
||||
print(dfPsnr)
|
||||
print(dfLatency)
|
||||
qualityDataframe.to_csv(f"qualityResults{encoder}.csv")
|
||||
latencyDataframe.to_csv(f"latencyDataframe{encoder}.csv")
|
||||
|
||||
run_autotest()
|
||||
@ -1,96 +0,0 @@
|
||||
#!/usr/bin/python3
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
# Idea is next:
|
||||
# on set of experiments we are calculating all latency information -> each element avg, std, max numbers, total is not calculated, because it requires
|
||||
# additional parsing for parallel branches (from tee)
|
||||
# Ideally we would write data to table
|
||||
|
||||
idxCache = dict()
|
||||
|
||||
|
||||
def findWord(words, wordToSearch):
|
||||
global idxCache
|
||||
if wordToSearch in idxCache:
|
||||
for idx in idxCache[wordToSearch]:
|
||||
if idx < len(words) and words[idx].startswith(wordToSearch):
|
||||
return words[idx]
|
||||
else:
|
||||
if idx >= len(words):
|
||||
print(f"ERROR: trying to access index={idx} while: {words}")
|
||||
for word in words:
|
||||
if word.startswith(wordToSearch):
|
||||
idx = words.index(word)
|
||||
if not wordToSearch in idxCache:
|
||||
idxCache[wordToSearch] = []
|
||||
idxCache[wordToSearch].append(idx)
|
||||
return words[idx]
|
||||
return ""
|
||||
|
||||
# taken with love from GStreamerLatencyPlotter implementation
|
||||
|
||||
|
||||
def readAndParse(filename):
|
||||
result = dict()
|
||||
global idxCache
|
||||
with open(filename, "r") as latencyFile:
|
||||
lines = latencyFile.readlines()
|
||||
for line in lines:
|
||||
if line.find("new format string") != -1:
|
||||
continue
|
||||
words = line.split()
|
||||
if not words[len(words) - 1].startswith("ts="):
|
||||
continue
|
||||
|
||||
def findAndRemove(wordToSearch):
|
||||
res = findWord(words, wordToSearch)
|
||||
res = res[res.find(")") + 1:len(res) - 1]
|
||||
return res
|
||||
|
||||
name = findWord(words, "element=(string)")
|
||||
if name == "":
|
||||
name = findWord(words, "src-element=(string)")
|
||||
if name == "":
|
||||
continue
|
||||
src = findAndRemove("src=(string)")
|
||||
name = name[name.find(")") + 1:len(name) - 1]
|
||||
if name not in result:
|
||||
result[name] = {"latency": [], "ts": []}
|
||||
|
||||
timeWord = findAndRemove("time=(guint64)")
|
||||
tsWord = findAndRemove("ts=(guint64)")
|
||||
result[name]["latency"].append(
|
||||
int(timeWord)/1e6) # time=(guint64)=14
|
||||
result[name]["ts"].append(int(tsWord)/1e9) # ts=(guint64)=12
|
||||
# drop cache for future runs
|
||||
idxCache = dict()
|
||||
return result
|
||||
|
||||
|
||||
def getLatencyTable(filename):
|
||||
parsed = readAndParse(filename)
|
||||
df = pd.DataFrame(parsed)
|
||||
print(df)
|
||||
latency_row = df.loc['latency']
|
||||
ts_list = df.loc['ts']
|
||||
|
||||
avg_latency = latency_row.apply(np.mean)
|
||||
median_latency = latency_row.apply(np.median)
|
||||
max_latency = latency_row.apply(np.max)
|
||||
std_latency = latency_row.apply(np.std)
|
||||
dt_max_latency = dict()
|
||||
min_timestamp = ts_list.apply(np.min)
|
||||
|
||||
for column in df.columns:
|
||||
max_index = np.argmax(latency_row[column])
|
||||
dt = ts_list[column][max_index] - min_timestamp.min()
|
||||
dt_max_latency[column] = dt
|
||||
|
||||
df_dt_max = pd.Series(dt_max_latency)
|
||||
resultDf = pd.concat(
|
||||
[df_dt_max, max_latency, avg_latency, median_latency, std_latency], axis=1)
|
||||
resultDf.columns = ['dTmax', 'max', 'avg', 'median', 'std']
|
||||
print(resultDf)
|
||||
return resultDf
|
||||
|
||||
# getLatencyTable("latency_traces-x264enc-kpop-test-10.log")
|
||||
118
PyScripts/qa.py
118
PyScripts/qa.py
@ -1,118 +0,0 @@
|
||||
#!/usr/bin/python3
|
||||
import subprocess
|
||||
import pandas as pd
|
||||
|
||||
def run_psnr_check(original, encoded, video_info):
|
||||
out = ""
|
||||
# bad practice, but idgaf
|
||||
# -f rawvideo {video_info}
|
||||
options = f"-f rawvideo {video_info} -i {original} -i {encoded} -filter_complex psnr -f null /dev/null"
|
||||
with open("ffmpeg-log.txt", "w") as f:
|
||||
proc = subprocess.run(["ffmpeg", *options.split()], stdout=f, stderr=subprocess.STDOUT, text=True)
|
||||
print(f"Return code: {proc.returncode}")
|
||||
with open("ffmpeg-log.txt", "r") as f:
|
||||
out = f.read()
|
||||
return out
|
||||
|
||||
def run_ssim_check(original, encoded, video_info):
|
||||
# bad practice, but idgaf
|
||||
# -f rawvideo {video_info}
|
||||
# we don't need additional information with h264 encoded files
|
||||
options = f"-f rawvideo {video_info} -i {original} -i {encoded} -filter_complex ssim -f null /dev/null"
|
||||
with open("ffmpeg-log.txt", "w") as f:
|
||||
proc = subprocess.run(["ffmpeg", *options.split()], stdout=f, stderr=subprocess.STDOUT, text=True)
|
||||
print(f"Return code: {proc.returncode}")
|
||||
with open("ffmpeg-log.txt", "r") as f:
|
||||
out = f.read()
|
||||
return out
|
||||
|
||||
def parse_psnr_output(output):
|
||||
for line in output.splitlines():
|
||||
if "[Parsed_psnr" in line and "PSNR" in line:
|
||||
parts = line.split()
|
||||
y = parts[4].split(":")[1]
|
||||
u = parts[5].split(":")[1]
|
||||
v = parts[6].split(":")[1]
|
||||
avg = parts[7].split(":")[1]
|
||||
minYUV = parts[8].split(":")[1]
|
||||
maxYUV = parts[9].split(":")[1]
|
||||
return {
|
||||
"Y": y,
|
||||
"U": u,
|
||||
"V": v,
|
||||
"Average": avg,
|
||||
"MinYUV": minYUV,
|
||||
"MaxYUV": maxYUV
|
||||
}
|
||||
return {}
|
||||
|
||||
def parse_ssim_output(output):
|
||||
for line in output.splitlines():
|
||||
if "[Parsed_ssim" in line and "SSIM" in line:
|
||||
parts = line.split()
|
||||
all_value = parts[10].split(":")[1]
|
||||
y = parts[4].split(":")[1]
|
||||
u = parts[6].split(":")[1]
|
||||
v = parts[8].split(":")[1]
|
||||
return {
|
||||
"Y": y,
|
||||
"U": u,
|
||||
"V": v,
|
||||
"Average": all_value
|
||||
}
|
||||
return {}
|
||||
|
||||
def run_quality_check(original, encoded, option):
|
||||
psnr_result = run_psnr_check(original, encoded, option)
|
||||
ssim_result = run_ssim_check(original, encoded, option)
|
||||
psnr_metrics = parse_psnr_output(psnr_result)
|
||||
ssim_metrics = parse_ssim_output(ssim_result)
|
||||
print ("PSNR Metrics:", psnr_metrics)
|
||||
print ("SSIM Metrics:", ssim_metrics)
|
||||
return psnr_metrics, ssim_metrics
|
||||
|
||||
def parse_quality_report(psnr_metrics, ssim_metrics):
|
||||
psnrSeries = pd.Series(psnr_metrics)
|
||||
ssimSeries = pd.Series(ssim_metrics)
|
||||
combined = pd.concat([psnrSeries, ssimSeries], axis=1)
|
||||
combined.columns = ["PSNR", "SSIM"]
|
||||
combined = combined.fillna(0)
|
||||
return combined
|
||||
|
||||
# psnr, ssim = run_quality_check(
|
||||
# "base-x264enc-kpop-test-10.yuv",
|
||||
# "encoded-x264enc-kpop-test-10.mp4",
|
||||
# "-pixel_format yuv420p -color_range tv -video_size 1920x1080 -framerate 23.98 "
|
||||
# )
|
||||
|
||||
# combined = parse_quality_report(
|
||||
# psnr,
|
||||
# ssim
|
||||
# )
|
||||
|
||||
# encoder = "x264enc"
|
||||
# profile = "main"
|
||||
# params = "bitrate=5000"
|
||||
|
||||
# columns = pd.MultiIndex.from_tuples(
|
||||
# [(encoder, profile, params, col) for col in combined.columns]
|
||||
# )
|
||||
|
||||
# combined.columns = columns
|
||||
|
||||
# main_df = combined
|
||||
# profile = "baseline"
|
||||
|
||||
# combined2 = parse_quality_report(
|
||||
# psnr,
|
||||
# ssim
|
||||
# )
|
||||
# columns = pd.MultiIndex.from_tuples(
|
||||
# [(encoder, profile, params, col) for col in combined2.columns]
|
||||
# )
|
||||
# combined2.columns = columns
|
||||
# main_df = pd.concat([main_df, combined2], axis=1)
|
||||
# print(main_df)
|
||||
|
||||
# main_df.to_csv("quality_report.csv")
|
||||
|
||||
1
gstAutotest
Submodule
1
gstAutotest
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 7bff99a6c77449c5bcbe8421cb94e5bdd8022c97
|
||||
Loading…
x
Reference in New Issue
Block a user