diff --git a/PyScripts/gstreamerAutotest.py b/PyScripts/gstreamerAutotest.py index f9355bc..f7b3564 100644 --- a/PyScripts/gstreamerAutotest.py +++ b/PyScripts/gstreamerAutotest.py @@ -1,32 +1,35 @@ #!/usr/bin/python from itertools import product +import qa +from latencyParse import getLatencyTable options = { "x264enc": { - "bitrate" : ["10000", "20000", "5000"], - "speed-preset" : ["ultrafast", "fast", "medium"], + "bitrate": ["10000", "20000", "5000"], + "speed-preset": ["ultrafast", "fast", "medium"], "tune": ["zerolatency"], "slices-threads": ["true", "false"], "b-adapt": ["true", "false"], "rc-lookahead": ["40", "0"], "ref": ["3", "0"] - - }, - "nvh264enc" : { - "bitrate" : ["10000", "20000", "5000"], - "preset" : ["4", "5", "1"], - "rc-lookahead" : ["0"], - "rc-mode" : ["2", "0", "5"], - "zerolatency": ["true", "false"], }, - "nvv4l2h264enc": { - "bitrate" : ["10000000", "20000000", "5000000"], - "profile": ["0", "1", "2"], - "preset-id": ["1", "2", "3"], - "control-id": ["1", "2"], - "tuning-info-id": ["4", "2"] + "nvh264enc": { + "bitrate": ["10000", "20000", "5000"], + "preset": ["4", "5", "1"], + "rc-lookahead": ["0"], + "rc-mode": ["2", "0", "5"], + "zerolatency": ["true", "false"], + } + # , + # "nvv4l2h264enc": { + # "bitrate": ["10000000", "20000000", "5000000"], + # "profile": ["0", "1", "2"], + # "preset-id": ["1", "2", "3"], + # "control-id": ["1", "2"], + # "tuning-info-id": ["4", "2"] + # } } videos = [""] @@ -41,52 +44,173 @@ psnr_check = { } formats = { - "x264enc" : "I420", - "nvh264enc" : "NV12", + "x264enc": "I420", + "nvh264enc": "NV12", "nvv4l2h264enc": "I420" } profiles = ["baseline", "main"] +video_info = { + "video1":"-video_size 1920x1080 -framerate 23.98" +} + +latency_filename = "latency-traces-autotest.log" + +class Pipeline: + def __init__(self): + self.pipeline = "gst-launch-1.0 -e " + self.options = "" + + def add_tracing(self): + self.pipeline = ( + "GST_DEBUG_COLOR_MODE=off " + + "GST_TRACERS=\"latency(flags=pipeline+element)\" " + + "GST_DEBUG=GST_TRACER:7 GST_DEBUG_FILE=" + latency_filename + " " + + self.pipeline + ) + return self + + def add_source(self, source): + self.pipeline += source + " ! videoconvert ! " + return self + + def __add_tee(self, encoder): + self.pipeline += "capsfilter caps=video/x-raw,format=" + formats[encoder] + " ! " + self.pipeline += "tee name=t t. ! queue ! filesink location=\"base-autotest.yuv\" " + + def add_encoder(self, encoder, params): + self.__add_tee(encoder) + self.options += " ".join(params) + " " + self.pipeline += "t. ! queue ! " + self.pipeline += encoder + " " + self.pipeline += " ".join(params) + " " + return self + + def add_profile(self, profile): + self.pipeline += "capsfilter caps=\"video/x-h264,profile=" + profile + "\" ! " + self.options += "profile=" + profile + " " + return self + + def to_file(self, filename): + self.pipeline += "h264parse ! mp4mux ! filesink location=\"" + filename + "\"" + return self + + def makeVideoSrc(idx): return videosrc[0] + videos[idx] + videosrc[1] + def generateEncoderStrings(): global options result = dict() for encoder, value in options.items(): result[encoder] = generate_combinations(value) return result - + + def generate_combinations(config_dict): """ Generate all combinations of values from a configuration dictionary. - + Args: config_dict (dict): Dictionary with parameter names as keys and lists of values as values - + Returns: list: List of strings containing all parameter combinations """ combinations = [] - - # Get the keys and values in consistent order + keys = list(config_dict.keys()) value_lists = [config_dict[key] for key in keys] - - # Generate all combinations using itertools.product + for combo in product(*value_lists): - # Create a list of key=value strings param_strings = [] for key, value in zip(keys, combo): param_strings.append(f"{key}={value}") - - # Join all parameter strings with space separator + combinations.append(" ".join(param_strings)) - + return combinations -def generateRecordString(options, ): - pass +# Step-by-step: +# 1. Generate all combinations for each encoder +# 2. For each combination, create a GStreamer pipeline string +# 3. Start each pipeline with latency tracing enabled +# 3.1 Monitor CPU, GPU and memory usage during each pipeline run (nah, later, maybe) +# 4. Start latency parsing script after each pipeline and store results in a pandas dataframe: +# - two key columns: encoder name, parameters string +# 5. Run PSNR check after each pipeline and add results in the dataframe +# 6. Save dataframe to CSV file +import pandas as pd -print(len(generateEncoderStrings()[""])) \ No newline at end of file +qualityDataframe = pd.DataFrame() +latencyDataframe = pd.DataFrame() + +def run_pipeline(pipeline): + import subprocess + print("Running pipeline:") + print(pipeline) + with open("pipeline-log.txt", "w") as f: + proc = subprocess.run(pipeline, shell=True, stdout=f, stderr=subprocess.STDOUT, text=True) + print(f"Pipeline finished with return code: {proc.returncode}") + with open("pipeline-log.txt", "r") as f: + out = f.read() + print(out) + if proc.returncode != 0: + raise Exception("Pipeline failed, see log for details") + +def run_autotest(): + global qualityDataframe, latencyDataframe + encoders = generateEncoderStrings() + for encoder, combinations in encoders.items(): + for params in combinations: + for profile in profiles: + for idx in range(len(videos)): + filename = "autotest-" + encoder + "-" + profile + "-test-" + str(idx) + ".mp4" + pipeline = Pipeline() + pipeline = ( + pipeline.add_tracing() + .add_source(makeVideoSrc(idx)) + .add_encoder(encoder, params.split(" ")) + .add_profile(profile) + .to_file(filename) + ) + print(pipeline.pipeline) + try: + run_pipeline(pipeline.pipeline) + except Exception as e: + print(f"Error occurred: {e}") + continue + psnr_metrics, ssim_metrics = qa.run_quality_check( + videos[idx], + filename, + video_info[videos[idx]] + " " + psnr_check[encoder] + ) + dfPsnr = qa.parse_quality_report(psnr_metrics, ssim_metrics) + print("-----") + dfLatency = getLatencyTable(latency_filename) + columnsQ = pd.MultiIndex.from_tuples( + [(encoder, profile, params, col) for col in dfPsnr.columns] + ) + columnsLatency = pd.MultiIndex.from_tuples( + [(encoder, profile, params, col) for col in dfLatency.columns] + ) + dfPsnr.columns = columnsQ + dfLatency.columns = columnsLatency + qualityDataframe = pd.concat([qualityDataframe, dfPsnr], axis=1) + latencyDataframe = pd.concat([latencyDataframe, dfLatency], axis=1) + print("=====") + print("Current results:") + print(dfPsnr) + print(dfLatency) + +def run_timetracer(): + import time + start_time = time.time() + run_autotest() + end_time = time.time() + elapsed_time = end_time - start_time + print(f"Total execution time: {elapsed_time} seconds") + +run_timetracer() \ No newline at end of file diff --git a/PyScripts/latencyParse.py b/PyScripts/latencyParse.py index 0db57e5..77aeb13 100644 --- a/PyScripts/latencyParse.py +++ b/PyScripts/latencyParse.py @@ -51,10 +51,10 @@ def readAndParse(filename): if name not in result: result[name] = {"latency":[], "ts":[]} - timeWord = findWord(words, "time=(guint64)") - tsWord = findWord(words, "ts=(guint64)") - result[name]["latency"].append(int(timeWord[14:len(timeWord) - 1])/1e6) # time=(guint64)=14 - result[name]["ts"].append(int(tsWord[12:len(tsWord) - 1])/1e9) # ts=(guint64)=12 + timeWord = findAndRemove("time=(guint64)") + tsWord = findAndRemove("ts=(guint64)") + result[name]["latency"].append(int(timeWord)/1e6) # time=(guint64)=14 + result[name]["ts"].append(int(tsWord)/1e9) # ts=(guint64)=12 return result @@ -81,5 +81,6 @@ def getLatencyTable(filename): resultDf = pd.concat([df_dt_max, max_latency, avg_latency, median_latency, std_latency], axis=1) resultDf.columns = ['dTmax', 'max', 'avg', 'median', 'std'] print(resultDf) + return resultDf -getLatencyTable("latency_traces-x264enc-big-pr-main.log") \ No newline at end of file +getLatencyTable("latency_traces-x264enc-kpop-test-10.log") \ No newline at end of file diff --git a/PyScripts/qa.py b/PyScripts/qa.py new file mode 100644 index 0000000..e686a4c --- /dev/null +++ b/PyScripts/qa.py @@ -0,0 +1,113 @@ +#!/usr/bin/python3 +import subprocess +import pandas as pd + +def run_psnr_check(original, encoded, video_info): + out = "" + options = f"-f rawvideo {video_info} -i {original} -i {encoded} -filter_complex psnr -f null /dev/null" + with open("ffmpeg-log.txt", "w") as f: + proc = subprocess.run(["ffmpeg", *options.split()], stdout=f, stderr=subprocess.STDOUT, text=True) + print(f"Return code: {proc.returncode}") + with open("ffmpeg-log.txt", "r") as f: + out = f.read() + return out + +def run_ssim_check(original, encoded, video_info): + options = f"-f rawvideo {video_info} -i {original} -i {encoded} -filter_complex ssim -f null /dev/null" + with open("ffmpeg-log.txt", "w") as f: + proc = subprocess.run(["ffmpeg", *options.split()], stdout=f, stderr=subprocess.STDOUT, text=True) + print(f"Return code: {proc.returncode}") + with open("ffmpeg-log.txt", "r") as f: + out = f.read() + return out + +def parse_psnr_output(output): + for line in output.splitlines(): + if "[Parsed_psnr" in line and "PSNR" in line: + parts = line.split() + y = parts[4].split(":")[1] + u = parts[5].split(":")[1] + v = parts[6].split(":")[1] + avg = parts[7].split(":")[1] + minYUV = parts[8].split(":")[1] + maxYUV = parts[9].split(":")[1] + return { + "Y": y, + "U": u, + "V": v, + "Average": avg, + "MinYUV": minYUV, + "MaxYUV": maxYUV + } + return {} + +def parse_ssim_output(output): + for line in output.splitlines(): + if "[Parsed_ssim" in line and "SSIM" in line: + parts = line.split() + all_value = parts[10].split(":")[1] + y = parts[4].split(":")[1] + u = parts[6].split(":")[1] + v = parts[8].split(":")[1] + return { + "Y": y, + "U": u, + "V": v, + "Average": all_value + } + return {} + +def run_quality_check(original, encoded, option): + psnr_result = run_psnr_check(original, encoded, option) + ssim_result = run_ssim_check(original, encoded, option) + psnr_metrics = parse_psnr_output(psnr_result) + ssim_metrics = parse_ssim_output(ssim_result) + print ("PSNR Metrics:", psnr_metrics) + print ("SSIM Metrics:", ssim_metrics) + return psnr_metrics, ssim_metrics + +def parse_quality_report(psnr_metrics, ssim_metrics): + psnrSeries = pd.Series(psnr_metrics) + ssimSeries = pd.Series(ssim_metrics) + combined = pd.concat([psnrSeries, ssimSeries], axis=1) + combined.columns = ["PSNR", "SSIM"] + combined = combined.fillna(0) + return combined + +# psnr, ssim = run_quality_check( +# "base-x264enc-kpop-test-10.yuv", +# "encoded-x264enc-kpop-test-10.mp4", +# "-pixel_format yuv420p -color_range tv -video_size 1920x1080 -framerate 23.98 " +# ) + +# combined = parse_quality_report( +# psnr, +# ssim +# ) + +# encoder = "x264enc" +# profile = "main" +# params = "bitrate=5000" + +# columns = pd.MultiIndex.from_tuples( +# [(encoder, profile, params, col) for col in combined.columns] +# ) + +# combined.columns = columns + +# main_df = combined +# profile = "baseline" + +# combined2 = parse_quality_report( +# psnr, +# ssim +# ) +# columns = pd.MultiIndex.from_tuples( +# [(encoder, profile, params, col) for col in combined2.columns] +# ) +# combined2.columns = columns +# main_df = pd.concat([main_df, combined2], axis=1) +# print(main_df) + +# main_df.to_csv("quality_report.csv") +